]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/dwarf2cfi.c
emit-rtl.c, [...]: Replace rtx base types with more derived ones.
[thirdparty/gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "version.h"
25 #include "flags.h"
26 #include "rtl.h"
27 #include "hash-set.h"
28 #include "vec.h"
29 #include "input.h"
30 #include "alias.h"
31 #include "symtab.h"
32 #include "inchash.h"
33 #include "tree.h"
34 #include "stor-layout.h"
35 #include "hard-reg-set.h"
36 #include "function.h"
37 #include "cfgbuild.h"
38 #include "dwarf2.h"
39 #include "dwarf2out.h"
40 #include "dwarf2asm.h"
41 #include "ggc.h"
42 #include "hash-table.h"
43 #include "tm_p.h"
44 #include "target.h"
45 #include "common/common-target.h"
46 #include "tree-pass.h"
47
48 #include "except.h" /* expand_builtin_dwarf_sp_column */
49 #include "hashtab.h"
50 #include "statistics.h"
51 #include "insn-config.h"
52 #include "expmed.h"
53 #include "dojump.h"
54 #include "explow.h"
55 #include "calls.h"
56 #include "emit-rtl.h"
57 #include "varasm.h"
58 #include "stmt.h"
59 #include "expr.h" /* init_return_column_size */
60 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
61 #include "output.h" /* asm_out_file */
62 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
63
64
65 /* ??? Poison these here until it can be done generically. They've been
66 totally replaced in this file; make sure it stays that way. */
67 #undef DWARF2_UNWIND_INFO
68 #undef DWARF2_FRAME_INFO
69 #if (GCC_VERSION >= 3000)
70 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
71 #endif
72
73 #ifndef INCOMING_RETURN_ADDR_RTX
74 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
75 #endif
76
77 /* Maximum size (in bytes) of an artificially generated label. */
78 #define MAX_ARTIFICIAL_LABEL_BYTES 30
79 \f
80 /* A collected description of an entire row of the abstract CFI table. */
81 typedef struct GTY(()) dw_cfi_row_struct
82 {
83 /* The expression that computes the CFA, expressed in two different ways.
84 The CFA member for the simple cases, and the full CFI expression for
85 the complex cases. The later will be a DW_CFA_cfa_expression. */
86 dw_cfa_location cfa;
87 dw_cfi_ref cfa_cfi;
88
89 /* The expressions for any register column that is saved. */
90 cfi_vec reg_save;
91 } dw_cfi_row;
92
93 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
94 typedef struct GTY(()) reg_saved_in_data_struct {
95 rtx orig_reg;
96 rtx saved_in_reg;
97 } reg_saved_in_data;
98
99
100 /* Since we no longer have a proper CFG, we're going to create a facsimile
101 of one on the fly while processing the frame-related insns.
102
103 We create dw_trace_info structures for each extended basic block beginning
104 and ending at a "save point". Save points are labels, barriers, certain
105 notes, and of course the beginning and end of the function.
106
107 As we encounter control transfer insns, we propagate the "current"
108 row state across the edges to the starts of traces. When checking is
109 enabled, we validate that we propagate the same data from all sources.
110
111 All traces are members of the TRACE_INFO array, in the order in which
112 they appear in the instruction stream.
113
114 All save points are present in the TRACE_INDEX hash, mapping the insn
115 starting a trace to the dw_trace_info describing the trace. */
116
117 typedef struct
118 {
119 /* The insn that begins the trace. */
120 rtx_insn *head;
121
122 /* The row state at the beginning and end of the trace. */
123 dw_cfi_row *beg_row, *end_row;
124
125 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
126 while scanning insns. However, the args_size value is irrelevant at
127 any point except can_throw_internal_p insns. Therefore the "delay"
128 sizes the values that must actually be emitted for this trace. */
129 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
130 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
131
132 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
133 rtx_insn *eh_head;
134
135 /* The following variables contain data used in interpreting frame related
136 expressions. These are not part of the "real" row state as defined by
137 Dwarf, but it seems like they need to be propagated into a trace in case
138 frame related expressions have been sunk. */
139 /* ??? This seems fragile. These variables are fragments of a larger
140 expression. If we do not keep the entire expression together, we risk
141 not being able to put it together properly. Consider forcing targets
142 to generate self-contained expressions and dropping all of the magic
143 interpretation code in this file. Or at least refusing to shrink wrap
144 any frame related insn that doesn't contain a complete expression. */
145
146 /* The register used for saving registers to the stack, and its offset
147 from the CFA. */
148 dw_cfa_location cfa_store;
149
150 /* A temporary register holding an integral value used in adjusting SP
151 or setting up the store_reg. The "offset" field holds the integer
152 value, not an offset. */
153 dw_cfa_location cfa_temp;
154
155 /* A set of registers saved in other registers. This is the inverse of
156 the row->reg_save info, if the entry is a DW_CFA_register. This is
157 implemented as a flat array because it normally contains zero or 1
158 entry, depending on the target. IA-64 is the big spender here, using
159 a maximum of 5 entries. */
160 vec<reg_saved_in_data> regs_saved_in_regs;
161
162 /* An identifier for this trace. Used only for debugging dumps. */
163 unsigned id;
164
165 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
166 bool switch_sections;
167
168 /* True if we've seen different values incoming to beg_true_args_size. */
169 bool args_size_undefined;
170 } dw_trace_info;
171
172
173 typedef dw_trace_info *dw_trace_info_ref;
174
175
176 /* Hashtable helpers. */
177
178 struct trace_info_hasher : typed_noop_remove <dw_trace_info>
179 {
180 typedef dw_trace_info *value_type;
181 typedef dw_trace_info *compare_type;
182 static inline hashval_t hash (const dw_trace_info *);
183 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
184 };
185
186 inline hashval_t
187 trace_info_hasher::hash (const dw_trace_info *ti)
188 {
189 return INSN_UID (ti->head);
190 }
191
192 inline bool
193 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
194 {
195 return a->head == b->head;
196 }
197
198
199 /* The variables making up the pseudo-cfg, as described above. */
200 static vec<dw_trace_info> trace_info;
201 static vec<dw_trace_info_ref> trace_work_list;
202 static hash_table<trace_info_hasher> *trace_index;
203
204 /* A vector of call frame insns for the CIE. */
205 cfi_vec cie_cfi_vec;
206
207 /* The state of the first row of the FDE table, which includes the
208 state provided by the CIE. */
209 static GTY(()) dw_cfi_row *cie_cfi_row;
210
211 static GTY(()) reg_saved_in_data *cie_return_save;
212
213 static GTY(()) unsigned long dwarf2out_cfi_label_num;
214
215 /* The insn after which a new CFI note should be emitted. */
216 static rtx_insn *add_cfi_insn;
217
218 /* When non-null, add_cfi will add the CFI to this vector. */
219 static cfi_vec *add_cfi_vec;
220
221 /* The current instruction trace. */
222 static dw_trace_info *cur_trace;
223
224 /* The current, i.e. most recently generated, row of the CFI table. */
225 static dw_cfi_row *cur_row;
226
227 /* A copy of the current CFA, for use during the processing of a
228 single insn. */
229 static dw_cfa_location *cur_cfa;
230
231 /* We delay emitting a register save until either (a) we reach the end
232 of the prologue or (b) the register is clobbered. This clusters
233 register saves so that there are fewer pc advances. */
234
235 typedef struct {
236 rtx reg;
237 rtx saved_reg;
238 HOST_WIDE_INT cfa_offset;
239 } queued_reg_save;
240
241
242 static vec<queued_reg_save> queued_reg_saves;
243
244 /* True if any CFI directives were emitted at the current insn. */
245 static bool any_cfis_emitted;
246
247 /* Short-hand for commonly used register numbers. */
248 static unsigned dw_stack_pointer_regnum;
249 static unsigned dw_frame_pointer_regnum;
250 \f
251 /* Hook used by __throw. */
252
253 rtx
254 expand_builtin_dwarf_sp_column (void)
255 {
256 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
257 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
258 }
259
260 /* MEM is a memory reference for the register size table, each element of
261 which has mode MODE. Initialize column C as a return address column. */
262
263 static void
264 init_return_column_size (machine_mode mode, rtx mem, unsigned int c)
265 {
266 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
267 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
268 emit_move_insn (adjust_address (mem, mode, offset),
269 gen_int_mode (size, mode));
270 }
271
272 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
273 init_one_dwarf_reg_size to communicate on what has been done by the
274 latter. */
275
276 typedef struct
277 {
278 /* Whether the dwarf return column was initialized. */
279 bool wrote_return_column;
280
281 /* For each hard register REGNO, whether init_one_dwarf_reg_size
282 was given REGNO to process already. */
283 bool processed_regno [FIRST_PSEUDO_REGISTER];
284
285 } init_one_dwarf_reg_state;
286
287 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
288 initialize the dwarf register size table entry corresponding to register
289 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
290 use for the size entry to initialize, and INIT_STATE is the communication
291 datastructure conveying what we're doing to our caller. */
292
293 static
294 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
295 rtx table, machine_mode slotmode,
296 init_one_dwarf_reg_state *init_state)
297 {
298 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
299 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
300 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
301
302 const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
303 const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
304
305 init_state->processed_regno[regno] = true;
306
307 if (rnum >= DWARF_FRAME_REGISTERS)
308 return;
309
310 if (dnum == DWARF_FRAME_RETURN_COLUMN)
311 {
312 if (regmode == VOIDmode)
313 return;
314 init_state->wrote_return_column = true;
315 }
316
317 if (slotoffset < 0)
318 return;
319
320 emit_move_insn (adjust_address (table, slotmode, slotoffset),
321 gen_int_mode (regsize, slotmode));
322 }
323
324 /* Generate code to initialize the dwarf register size table located
325 at the provided ADDRESS. */
326
327 void
328 expand_builtin_init_dwarf_reg_sizes (tree address)
329 {
330 unsigned int i;
331 machine_mode mode = TYPE_MODE (char_type_node);
332 rtx addr = expand_normal (address);
333 rtx mem = gen_rtx_MEM (BLKmode, addr);
334
335 init_one_dwarf_reg_state init_state;
336
337 memset ((char *)&init_state, 0, sizeof (init_state));
338
339 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
340 {
341 machine_mode save_mode;
342 rtx span;
343
344 /* No point in processing a register multiple times. This could happen
345 with register spans, e.g. when a reg is first processed as a piece of
346 a span, then as a register on its own later on. */
347
348 if (init_state.processed_regno[i])
349 continue;
350
351 save_mode = targetm.dwarf_frame_reg_mode (i);
352 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
353
354 if (!span)
355 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
356 else
357 {
358 for (int si = 0; si < XVECLEN (span, 0); si++)
359 {
360 rtx reg = XVECEXP (span, 0, si);
361
362 init_one_dwarf_reg_size
363 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
364 }
365 }
366 }
367
368 if (!init_state.wrote_return_column)
369 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
370
371 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
372 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
373 #endif
374
375 targetm.init_dwarf_reg_sizes_extra (address);
376 }
377
378 \f
379 static dw_trace_info *
380 get_trace_info (rtx_insn *insn)
381 {
382 dw_trace_info dummy;
383 dummy.head = insn;
384 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
385 }
386
387 static bool
388 save_point_p (rtx_insn *insn)
389 {
390 /* Labels, except those that are really jump tables. */
391 if (LABEL_P (insn))
392 return inside_basic_block_p (insn);
393
394 /* We split traces at the prologue/epilogue notes because those
395 are points at which the unwind info is usually stable. This
396 makes it easier to find spots with identical unwind info so
397 that we can use remember/restore_state opcodes. */
398 if (NOTE_P (insn))
399 switch (NOTE_KIND (insn))
400 {
401 case NOTE_INSN_PROLOGUE_END:
402 case NOTE_INSN_EPILOGUE_BEG:
403 return true;
404 }
405
406 return false;
407 }
408
409 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
410
411 static inline HOST_WIDE_INT
412 div_data_align (HOST_WIDE_INT off)
413 {
414 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
415 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
416 return r;
417 }
418
419 /* Return true if we need a signed version of a given opcode
420 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
421
422 static inline bool
423 need_data_align_sf_opcode (HOST_WIDE_INT off)
424 {
425 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
426 }
427
428 /* Return a pointer to a newly allocated Call Frame Instruction. */
429
430 static inline dw_cfi_ref
431 new_cfi (void)
432 {
433 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
434
435 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
436 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
437
438 return cfi;
439 }
440
441 /* Return a newly allocated CFI row, with no defined data. */
442
443 static dw_cfi_row *
444 new_cfi_row (void)
445 {
446 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
447
448 row->cfa.reg = INVALID_REGNUM;
449
450 return row;
451 }
452
453 /* Return a copy of an existing CFI row. */
454
455 static dw_cfi_row *
456 copy_cfi_row (dw_cfi_row *src)
457 {
458 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
459
460 *dst = *src;
461 dst->reg_save = vec_safe_copy (src->reg_save);
462
463 return dst;
464 }
465
466 /* Generate a new label for the CFI info to refer to. */
467
468 static char *
469 dwarf2out_cfi_label (void)
470 {
471 int num = dwarf2out_cfi_label_num++;
472 char label[20];
473
474 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
475
476 return xstrdup (label);
477 }
478
479 /* Add CFI either to the current insn stream or to a vector, or both. */
480
481 static void
482 add_cfi (dw_cfi_ref cfi)
483 {
484 any_cfis_emitted = true;
485
486 if (add_cfi_insn != NULL)
487 {
488 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
489 NOTE_CFI (add_cfi_insn) = cfi;
490 }
491
492 if (add_cfi_vec != NULL)
493 vec_safe_push (*add_cfi_vec, cfi);
494 }
495
496 static void
497 add_cfi_args_size (HOST_WIDE_INT size)
498 {
499 dw_cfi_ref cfi = new_cfi ();
500
501 /* While we can occasionally have args_size < 0 internally, this state
502 should not persist at a point we actually need an opcode. */
503 gcc_assert (size >= 0);
504
505 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
506 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
507
508 add_cfi (cfi);
509 }
510
511 static void
512 add_cfi_restore (unsigned reg)
513 {
514 dw_cfi_ref cfi = new_cfi ();
515
516 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
517 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
518
519 add_cfi (cfi);
520 }
521
522 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
523 that the register column is no longer saved. */
524
525 static void
526 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
527 {
528 if (vec_safe_length (row->reg_save) <= column)
529 vec_safe_grow_cleared (row->reg_save, column + 1);
530 (*row->reg_save)[column] = cfi;
531 }
532
533 /* This function fills in aa dw_cfa_location structure from a dwarf location
534 descriptor sequence. */
535
536 static void
537 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
538 {
539 struct dw_loc_descr_node *ptr;
540 cfa->offset = 0;
541 cfa->base_offset = 0;
542 cfa->indirect = 0;
543 cfa->reg = -1;
544
545 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
546 {
547 enum dwarf_location_atom op = ptr->dw_loc_opc;
548
549 switch (op)
550 {
551 case DW_OP_reg0:
552 case DW_OP_reg1:
553 case DW_OP_reg2:
554 case DW_OP_reg3:
555 case DW_OP_reg4:
556 case DW_OP_reg5:
557 case DW_OP_reg6:
558 case DW_OP_reg7:
559 case DW_OP_reg8:
560 case DW_OP_reg9:
561 case DW_OP_reg10:
562 case DW_OP_reg11:
563 case DW_OP_reg12:
564 case DW_OP_reg13:
565 case DW_OP_reg14:
566 case DW_OP_reg15:
567 case DW_OP_reg16:
568 case DW_OP_reg17:
569 case DW_OP_reg18:
570 case DW_OP_reg19:
571 case DW_OP_reg20:
572 case DW_OP_reg21:
573 case DW_OP_reg22:
574 case DW_OP_reg23:
575 case DW_OP_reg24:
576 case DW_OP_reg25:
577 case DW_OP_reg26:
578 case DW_OP_reg27:
579 case DW_OP_reg28:
580 case DW_OP_reg29:
581 case DW_OP_reg30:
582 case DW_OP_reg31:
583 cfa->reg = op - DW_OP_reg0;
584 break;
585 case DW_OP_regx:
586 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
587 break;
588 case DW_OP_breg0:
589 case DW_OP_breg1:
590 case DW_OP_breg2:
591 case DW_OP_breg3:
592 case DW_OP_breg4:
593 case DW_OP_breg5:
594 case DW_OP_breg6:
595 case DW_OP_breg7:
596 case DW_OP_breg8:
597 case DW_OP_breg9:
598 case DW_OP_breg10:
599 case DW_OP_breg11:
600 case DW_OP_breg12:
601 case DW_OP_breg13:
602 case DW_OP_breg14:
603 case DW_OP_breg15:
604 case DW_OP_breg16:
605 case DW_OP_breg17:
606 case DW_OP_breg18:
607 case DW_OP_breg19:
608 case DW_OP_breg20:
609 case DW_OP_breg21:
610 case DW_OP_breg22:
611 case DW_OP_breg23:
612 case DW_OP_breg24:
613 case DW_OP_breg25:
614 case DW_OP_breg26:
615 case DW_OP_breg27:
616 case DW_OP_breg28:
617 case DW_OP_breg29:
618 case DW_OP_breg30:
619 case DW_OP_breg31:
620 cfa->reg = op - DW_OP_breg0;
621 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
622 break;
623 case DW_OP_bregx:
624 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
625 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
626 break;
627 case DW_OP_deref:
628 cfa->indirect = 1;
629 break;
630 case DW_OP_plus_uconst:
631 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
632 break;
633 default:
634 gcc_unreachable ();
635 }
636 }
637 }
638
639 /* Find the previous value for the CFA, iteratively. CFI is the opcode
640 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
641 one level of remember/restore state processing. */
642
643 void
644 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
645 {
646 switch (cfi->dw_cfi_opc)
647 {
648 case DW_CFA_def_cfa_offset:
649 case DW_CFA_def_cfa_offset_sf:
650 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
651 break;
652 case DW_CFA_def_cfa_register:
653 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
654 break;
655 case DW_CFA_def_cfa:
656 case DW_CFA_def_cfa_sf:
657 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
658 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
659 break;
660 case DW_CFA_def_cfa_expression:
661 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
662 break;
663
664 case DW_CFA_remember_state:
665 gcc_assert (!remember->in_use);
666 *remember = *loc;
667 remember->in_use = 1;
668 break;
669 case DW_CFA_restore_state:
670 gcc_assert (remember->in_use);
671 *loc = *remember;
672 remember->in_use = 0;
673 break;
674
675 default:
676 break;
677 }
678 }
679
680 /* Determine if two dw_cfa_location structures define the same data. */
681
682 bool
683 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
684 {
685 return (loc1->reg == loc2->reg
686 && loc1->offset == loc2->offset
687 && loc1->indirect == loc2->indirect
688 && (loc1->indirect == 0
689 || loc1->base_offset == loc2->base_offset));
690 }
691
692 /* Determine if two CFI operands are identical. */
693
694 static bool
695 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
696 {
697 switch (t)
698 {
699 case dw_cfi_oprnd_unused:
700 return true;
701 case dw_cfi_oprnd_reg_num:
702 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
703 case dw_cfi_oprnd_offset:
704 return a->dw_cfi_offset == b->dw_cfi_offset;
705 case dw_cfi_oprnd_addr:
706 return (a->dw_cfi_addr == b->dw_cfi_addr
707 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
708 case dw_cfi_oprnd_loc:
709 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
710 }
711 gcc_unreachable ();
712 }
713
714 /* Determine if two CFI entries are identical. */
715
716 static bool
717 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
718 {
719 enum dwarf_call_frame_info opc;
720
721 /* Make things easier for our callers, including missing operands. */
722 if (a == b)
723 return true;
724 if (a == NULL || b == NULL)
725 return false;
726
727 /* Obviously, the opcodes must match. */
728 opc = a->dw_cfi_opc;
729 if (opc != b->dw_cfi_opc)
730 return false;
731
732 /* Compare the two operands, re-using the type of the operands as
733 already exposed elsewhere. */
734 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
735 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
736 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
737 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
738 }
739
740 /* Determine if two CFI_ROW structures are identical. */
741
742 static bool
743 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
744 {
745 size_t i, n_a, n_b, n_max;
746
747 if (a->cfa_cfi)
748 {
749 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
750 return false;
751 }
752 else if (!cfa_equal_p (&a->cfa, &b->cfa))
753 return false;
754
755 n_a = vec_safe_length (a->reg_save);
756 n_b = vec_safe_length (b->reg_save);
757 n_max = MAX (n_a, n_b);
758
759 for (i = 0; i < n_max; ++i)
760 {
761 dw_cfi_ref r_a = NULL, r_b = NULL;
762
763 if (i < n_a)
764 r_a = (*a->reg_save)[i];
765 if (i < n_b)
766 r_b = (*b->reg_save)[i];
767
768 if (!cfi_equal_p (r_a, r_b))
769 return false;
770 }
771
772 return true;
773 }
774
775 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
776 what opcode to emit. Returns the CFI opcode to effect the change, or
777 NULL if NEW_CFA == OLD_CFA. */
778
779 static dw_cfi_ref
780 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
781 {
782 dw_cfi_ref cfi;
783
784 /* If nothing changed, no need to issue any call frame instructions. */
785 if (cfa_equal_p (old_cfa, new_cfa))
786 return NULL;
787
788 cfi = new_cfi ();
789
790 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
791 {
792 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
793 the CFA register did not change but the offset did. The data
794 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
795 in the assembler via the .cfi_def_cfa_offset directive. */
796 if (new_cfa->offset < 0)
797 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
798 else
799 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
800 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
801 }
802 else if (new_cfa->offset == old_cfa->offset
803 && old_cfa->reg != INVALID_REGNUM
804 && !new_cfa->indirect
805 && !old_cfa->indirect)
806 {
807 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
808 indicating the CFA register has changed to <register> but the
809 offset has not changed. */
810 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
811 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
812 }
813 else if (new_cfa->indirect == 0)
814 {
815 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
816 indicating the CFA register has changed to <register> with
817 the specified offset. The data factoring for DW_CFA_def_cfa_sf
818 happens in output_cfi, or in the assembler via the .cfi_def_cfa
819 directive. */
820 if (new_cfa->offset < 0)
821 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
822 else
823 cfi->dw_cfi_opc = DW_CFA_def_cfa;
824 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
825 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
826 }
827 else
828 {
829 /* Construct a DW_CFA_def_cfa_expression instruction to
830 calculate the CFA using a full location expression since no
831 register-offset pair is available. */
832 struct dw_loc_descr_node *loc_list;
833
834 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
835 loc_list = build_cfa_loc (new_cfa, 0);
836 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
837 }
838
839 return cfi;
840 }
841
842 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
843
844 static void
845 def_cfa_1 (dw_cfa_location *new_cfa)
846 {
847 dw_cfi_ref cfi;
848
849 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
850 cur_trace->cfa_store.offset = new_cfa->offset;
851
852 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
853 if (cfi)
854 {
855 cur_row->cfa = *new_cfa;
856 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
857 ? cfi : NULL);
858
859 add_cfi (cfi);
860 }
861 }
862
863 /* Add the CFI for saving a register. REG is the CFA column number.
864 If SREG is -1, the register is saved at OFFSET from the CFA;
865 otherwise it is saved in SREG. */
866
867 static void
868 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
869 {
870 dw_fde_ref fde = cfun ? cfun->fde : NULL;
871 dw_cfi_ref cfi = new_cfi ();
872
873 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
874
875 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
876 if (fde
877 && fde->stack_realign
878 && sreg == INVALID_REGNUM)
879 {
880 cfi->dw_cfi_opc = DW_CFA_expression;
881 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
882 cfi->dw_cfi_oprnd2.dw_cfi_loc
883 = build_cfa_aligned_loc (&cur_row->cfa, offset,
884 fde->stack_realignment);
885 }
886 else if (sreg == INVALID_REGNUM)
887 {
888 if (need_data_align_sf_opcode (offset))
889 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
890 else if (reg & ~0x3f)
891 cfi->dw_cfi_opc = DW_CFA_offset_extended;
892 else
893 cfi->dw_cfi_opc = DW_CFA_offset;
894 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
895 }
896 else if (sreg == reg)
897 {
898 /* While we could emit something like DW_CFA_same_value or
899 DW_CFA_restore, we never expect to see something like that
900 in a prologue. This is more likely to be a bug. A backend
901 can always bypass this by using REG_CFA_RESTORE directly. */
902 gcc_unreachable ();
903 }
904 else
905 {
906 cfi->dw_cfi_opc = DW_CFA_register;
907 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
908 }
909
910 add_cfi (cfi);
911 update_row_reg_save (cur_row, reg, cfi);
912 }
913
914 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
915 and adjust data structures to match. */
916
917 static void
918 notice_args_size (rtx_insn *insn)
919 {
920 HOST_WIDE_INT args_size, delta;
921 rtx note;
922
923 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
924 if (note == NULL)
925 return;
926
927 args_size = INTVAL (XEXP (note, 0));
928 delta = args_size - cur_trace->end_true_args_size;
929 if (delta == 0)
930 return;
931
932 cur_trace->end_true_args_size = args_size;
933
934 /* If the CFA is computed off the stack pointer, then we must adjust
935 the computation of the CFA as well. */
936 if (cur_cfa->reg == dw_stack_pointer_regnum)
937 {
938 gcc_assert (!cur_cfa->indirect);
939
940 /* Convert a change in args_size (always a positive in the
941 direction of stack growth) to a change in stack pointer. */
942 if (!STACK_GROWS_DOWNWARD)
943 delta = -delta;
944
945 cur_cfa->offset += delta;
946 }
947 }
948
949 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
950 data within the trace related to EH insns and args_size. */
951
952 static void
953 notice_eh_throw (rtx_insn *insn)
954 {
955 HOST_WIDE_INT args_size;
956
957 args_size = cur_trace->end_true_args_size;
958 if (cur_trace->eh_head == NULL)
959 {
960 cur_trace->eh_head = insn;
961 cur_trace->beg_delay_args_size = args_size;
962 cur_trace->end_delay_args_size = args_size;
963 }
964 else if (cur_trace->end_delay_args_size != args_size)
965 {
966 cur_trace->end_delay_args_size = args_size;
967
968 /* ??? If the CFA is the stack pointer, search backward for the last
969 CFI note and insert there. Given that the stack changed for the
970 args_size change, there *must* be such a note in between here and
971 the last eh insn. */
972 add_cfi_args_size (args_size);
973 }
974 }
975
976 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
977 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
978 used in places where rtl is prohibited. */
979
980 static inline unsigned
981 dwf_regno (const_rtx reg)
982 {
983 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
984 return DWARF_FRAME_REGNUM (REGNO (reg));
985 }
986
987 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
988
989 static bool
990 compare_reg_or_pc (rtx x, rtx y)
991 {
992 if (REG_P (x) && REG_P (y))
993 return REGNO (x) == REGNO (y);
994 return x == y;
995 }
996
997 /* Record SRC as being saved in DEST. DEST may be null to delete an
998 existing entry. SRC may be a register or PC_RTX. */
999
1000 static void
1001 record_reg_saved_in_reg (rtx dest, rtx src)
1002 {
1003 reg_saved_in_data *elt;
1004 size_t i;
1005
1006 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
1007 if (compare_reg_or_pc (elt->orig_reg, src))
1008 {
1009 if (dest == NULL)
1010 cur_trace->regs_saved_in_regs.unordered_remove (i);
1011 else
1012 elt->saved_in_reg = dest;
1013 return;
1014 }
1015
1016 if (dest == NULL)
1017 return;
1018
1019 reg_saved_in_data e = {src, dest};
1020 cur_trace->regs_saved_in_regs.safe_push (e);
1021 }
1022
1023 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1024 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1025
1026 static void
1027 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1028 {
1029 queued_reg_save *q;
1030 queued_reg_save e = {reg, sreg, offset};
1031 size_t i;
1032
1033 /* Duplicates waste space, but it's also necessary to remove them
1034 for correctness, since the queue gets output in reverse order. */
1035 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1036 if (compare_reg_or_pc (q->reg, reg))
1037 {
1038 *q = e;
1039 return;
1040 }
1041
1042 queued_reg_saves.safe_push (e);
1043 }
1044
1045 /* Output all the entries in QUEUED_REG_SAVES. */
1046
1047 static void
1048 dwarf2out_flush_queued_reg_saves (void)
1049 {
1050 queued_reg_save *q;
1051 size_t i;
1052
1053 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1054 {
1055 unsigned int reg, sreg;
1056
1057 record_reg_saved_in_reg (q->saved_reg, q->reg);
1058
1059 if (q->reg == pc_rtx)
1060 reg = DWARF_FRAME_RETURN_COLUMN;
1061 else
1062 reg = dwf_regno (q->reg);
1063 if (q->saved_reg)
1064 sreg = dwf_regno (q->saved_reg);
1065 else
1066 sreg = INVALID_REGNUM;
1067 reg_save (reg, sreg, q->cfa_offset);
1068 }
1069
1070 queued_reg_saves.truncate (0);
1071 }
1072
1073 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1074 location for? Or, does it clobber a register which we've previously
1075 said that some other register is saved in, and for which we now
1076 have a new location for? */
1077
1078 static bool
1079 clobbers_queued_reg_save (const_rtx insn)
1080 {
1081 queued_reg_save *q;
1082 size_t iq;
1083
1084 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1085 {
1086 size_t ir;
1087 reg_saved_in_data *rir;
1088
1089 if (modified_in_p (q->reg, insn))
1090 return true;
1091
1092 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1093 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1094 && modified_in_p (rir->saved_in_reg, insn))
1095 return true;
1096 }
1097
1098 return false;
1099 }
1100
1101 /* What register, if any, is currently saved in REG? */
1102
1103 static rtx
1104 reg_saved_in (rtx reg)
1105 {
1106 unsigned int regn = REGNO (reg);
1107 queued_reg_save *q;
1108 reg_saved_in_data *rir;
1109 size_t i;
1110
1111 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1112 if (q->saved_reg && regn == REGNO (q->saved_reg))
1113 return q->reg;
1114
1115 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1116 if (regn == REGNO (rir->saved_in_reg))
1117 return rir->orig_reg;
1118
1119 return NULL_RTX;
1120 }
1121
1122 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1123
1124 static void
1125 dwarf2out_frame_debug_def_cfa (rtx pat)
1126 {
1127 memset (cur_cfa, 0, sizeof (*cur_cfa));
1128
1129 if (GET_CODE (pat) == PLUS)
1130 {
1131 cur_cfa->offset = INTVAL (XEXP (pat, 1));
1132 pat = XEXP (pat, 0);
1133 }
1134 if (MEM_P (pat))
1135 {
1136 cur_cfa->indirect = 1;
1137 pat = XEXP (pat, 0);
1138 if (GET_CODE (pat) == PLUS)
1139 {
1140 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1141 pat = XEXP (pat, 0);
1142 }
1143 }
1144 /* ??? If this fails, we could be calling into the _loc functions to
1145 define a full expression. So far no port does that. */
1146 gcc_assert (REG_P (pat));
1147 cur_cfa->reg = dwf_regno (pat);
1148 }
1149
1150 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1151
1152 static void
1153 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1154 {
1155 rtx src, dest;
1156
1157 gcc_assert (GET_CODE (pat) == SET);
1158 dest = XEXP (pat, 0);
1159 src = XEXP (pat, 1);
1160
1161 switch (GET_CODE (src))
1162 {
1163 case PLUS:
1164 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1165 cur_cfa->offset -= INTVAL (XEXP (src, 1));
1166 break;
1167
1168 case REG:
1169 break;
1170
1171 default:
1172 gcc_unreachable ();
1173 }
1174
1175 cur_cfa->reg = dwf_regno (dest);
1176 gcc_assert (cur_cfa->indirect == 0);
1177 }
1178
1179 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1180
1181 static void
1182 dwarf2out_frame_debug_cfa_offset (rtx set)
1183 {
1184 HOST_WIDE_INT offset;
1185 rtx src, addr, span;
1186 unsigned int sregno;
1187
1188 src = XEXP (set, 1);
1189 addr = XEXP (set, 0);
1190 gcc_assert (MEM_P (addr));
1191 addr = XEXP (addr, 0);
1192
1193 /* As documented, only consider extremely simple addresses. */
1194 switch (GET_CODE (addr))
1195 {
1196 case REG:
1197 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1198 offset = -cur_cfa->offset;
1199 break;
1200 case PLUS:
1201 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1202 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1203 break;
1204 default:
1205 gcc_unreachable ();
1206 }
1207
1208 if (src == pc_rtx)
1209 {
1210 span = NULL;
1211 sregno = DWARF_FRAME_RETURN_COLUMN;
1212 }
1213 else
1214 {
1215 span = targetm.dwarf_register_span (src);
1216 sregno = dwf_regno (src);
1217 }
1218
1219 /* ??? We'd like to use queue_reg_save, but we need to come up with
1220 a different flushing heuristic for epilogues. */
1221 if (!span)
1222 reg_save (sregno, INVALID_REGNUM, offset);
1223 else
1224 {
1225 /* We have a PARALLEL describing where the contents of SRC live.
1226 Adjust the offset for each piece of the PARALLEL. */
1227 HOST_WIDE_INT span_offset = offset;
1228
1229 gcc_assert (GET_CODE (span) == PARALLEL);
1230
1231 const int par_len = XVECLEN (span, 0);
1232 for (int par_index = 0; par_index < par_len; par_index++)
1233 {
1234 rtx elem = XVECEXP (span, 0, par_index);
1235 sregno = dwf_regno (src);
1236 reg_save (sregno, INVALID_REGNUM, span_offset);
1237 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1238 }
1239 }
1240 }
1241
1242 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1243
1244 static void
1245 dwarf2out_frame_debug_cfa_register (rtx set)
1246 {
1247 rtx src, dest;
1248 unsigned sregno, dregno;
1249
1250 src = XEXP (set, 1);
1251 dest = XEXP (set, 0);
1252
1253 record_reg_saved_in_reg (dest, src);
1254 if (src == pc_rtx)
1255 sregno = DWARF_FRAME_RETURN_COLUMN;
1256 else
1257 sregno = dwf_regno (src);
1258
1259 dregno = dwf_regno (dest);
1260
1261 /* ??? We'd like to use queue_reg_save, but we need to come up with
1262 a different flushing heuristic for epilogues. */
1263 reg_save (sregno, dregno, 0);
1264 }
1265
1266 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1267
1268 static void
1269 dwarf2out_frame_debug_cfa_expression (rtx set)
1270 {
1271 rtx src, dest, span;
1272 dw_cfi_ref cfi = new_cfi ();
1273 unsigned regno;
1274
1275 dest = SET_DEST (set);
1276 src = SET_SRC (set);
1277
1278 gcc_assert (REG_P (src));
1279 gcc_assert (MEM_P (dest));
1280
1281 span = targetm.dwarf_register_span (src);
1282 gcc_assert (!span);
1283
1284 regno = dwf_regno (src);
1285
1286 cfi->dw_cfi_opc = DW_CFA_expression;
1287 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1288 cfi->dw_cfi_oprnd2.dw_cfi_loc
1289 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1290 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1291
1292 /* ??? We'd like to use queue_reg_save, were the interface different,
1293 and, as above, we could manage flushing for epilogues. */
1294 add_cfi (cfi);
1295 update_row_reg_save (cur_row, regno, cfi);
1296 }
1297
1298 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1299
1300 static void
1301 dwarf2out_frame_debug_cfa_restore (rtx reg)
1302 {
1303 gcc_assert (REG_P (reg));
1304
1305 rtx span = targetm.dwarf_register_span (reg);
1306 if (!span)
1307 {
1308 unsigned int regno = dwf_regno (reg);
1309 add_cfi_restore (regno);
1310 update_row_reg_save (cur_row, regno, NULL);
1311 }
1312 else
1313 {
1314 /* We have a PARALLEL describing where the contents of REG live.
1315 Restore the register for each piece of the PARALLEL. */
1316 gcc_assert (GET_CODE (span) == PARALLEL);
1317
1318 const int par_len = XVECLEN (span, 0);
1319 for (int par_index = 0; par_index < par_len; par_index++)
1320 {
1321 reg = XVECEXP (span, 0, par_index);
1322 gcc_assert (REG_P (reg));
1323 unsigned int regno = dwf_regno (reg);
1324 add_cfi_restore (regno);
1325 update_row_reg_save (cur_row, regno, NULL);
1326 }
1327 }
1328 }
1329
1330 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1331 ??? Perhaps we should note in the CIE where windows are saved (instead of
1332 assuming 0(cfa)) and what registers are in the window. */
1333
1334 static void
1335 dwarf2out_frame_debug_cfa_window_save (void)
1336 {
1337 dw_cfi_ref cfi = new_cfi ();
1338
1339 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1340 add_cfi (cfi);
1341 }
1342
1343 /* Record call frame debugging information for an expression EXPR,
1344 which either sets SP or FP (adjusting how we calculate the frame
1345 address) or saves a register to the stack or another register.
1346 LABEL indicates the address of EXPR.
1347
1348 This function encodes a state machine mapping rtxes to actions on
1349 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1350 users need not read the source code.
1351
1352 The High-Level Picture
1353
1354 Changes in the register we use to calculate the CFA: Currently we
1355 assume that if you copy the CFA register into another register, we
1356 should take the other one as the new CFA register; this seems to
1357 work pretty well. If it's wrong for some target, it's simple
1358 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1359
1360 Changes in the register we use for saving registers to the stack:
1361 This is usually SP, but not always. Again, we deduce that if you
1362 copy SP into another register (and SP is not the CFA register),
1363 then the new register is the one we will be using for register
1364 saves. This also seems to work.
1365
1366 Register saves: There's not much guesswork about this one; if
1367 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1368 register save, and the register used to calculate the destination
1369 had better be the one we think we're using for this purpose.
1370 It's also assumed that a copy from a call-saved register to another
1371 register is saving that register if RTX_FRAME_RELATED_P is set on
1372 that instruction. If the copy is from a call-saved register to
1373 the *same* register, that means that the register is now the same
1374 value as in the caller.
1375
1376 Except: If the register being saved is the CFA register, and the
1377 offset is nonzero, we are saving the CFA, so we assume we have to
1378 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1379 the intent is to save the value of SP from the previous frame.
1380
1381 In addition, if a register has previously been saved to a different
1382 register,
1383
1384 Invariants / Summaries of Rules
1385
1386 cfa current rule for calculating the CFA. It usually
1387 consists of a register and an offset. This is
1388 actually stored in *cur_cfa, but abbreviated
1389 for the purposes of this documentation.
1390 cfa_store register used by prologue code to save things to the stack
1391 cfa_store.offset is the offset from the value of
1392 cfa_store.reg to the actual CFA
1393 cfa_temp register holding an integral value. cfa_temp.offset
1394 stores the value, which will be used to adjust the
1395 stack pointer. cfa_temp is also used like cfa_store,
1396 to track stores to the stack via fp or a temp reg.
1397
1398 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1399 with cfa.reg as the first operand changes the cfa.reg and its
1400 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1401 cfa_temp.offset.
1402
1403 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1404 expression yielding a constant. This sets cfa_temp.reg
1405 and cfa_temp.offset.
1406
1407 Rule 5: Create a new register cfa_store used to save items to the
1408 stack.
1409
1410 Rules 10-14: Save a register to the stack. Define offset as the
1411 difference of the original location and cfa_store's
1412 location (or cfa_temp's location if cfa_temp is used).
1413
1414 Rules 16-20: If AND operation happens on sp in prologue, we assume
1415 stack is realigned. We will use a group of DW_OP_XXX
1416 expressions to represent the location of the stored
1417 register instead of CFA+offset.
1418
1419 The Rules
1420
1421 "{a,b}" indicates a choice of a xor b.
1422 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1423
1424 Rule 1:
1425 (set <reg1> <reg2>:cfa.reg)
1426 effects: cfa.reg = <reg1>
1427 cfa.offset unchanged
1428 cfa_temp.reg = <reg1>
1429 cfa_temp.offset = cfa.offset
1430
1431 Rule 2:
1432 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1433 {<const_int>,<reg>:cfa_temp.reg}))
1434 effects: cfa.reg = sp if fp used
1435 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1436 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1437 if cfa_store.reg==sp
1438
1439 Rule 3:
1440 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1441 effects: cfa.reg = fp
1442 cfa_offset += +/- <const_int>
1443
1444 Rule 4:
1445 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1446 constraints: <reg1> != fp
1447 <reg1> != sp
1448 effects: cfa.reg = <reg1>
1449 cfa_temp.reg = <reg1>
1450 cfa_temp.offset = cfa.offset
1451
1452 Rule 5:
1453 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1454 constraints: <reg1> != fp
1455 <reg1> != sp
1456 effects: cfa_store.reg = <reg1>
1457 cfa_store.offset = cfa.offset - cfa_temp.offset
1458
1459 Rule 6:
1460 (set <reg> <const_int>)
1461 effects: cfa_temp.reg = <reg>
1462 cfa_temp.offset = <const_int>
1463
1464 Rule 7:
1465 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1466 effects: cfa_temp.reg = <reg1>
1467 cfa_temp.offset |= <const_int>
1468
1469 Rule 8:
1470 (set <reg> (high <exp>))
1471 effects: none
1472
1473 Rule 9:
1474 (set <reg> (lo_sum <exp> <const_int>))
1475 effects: cfa_temp.reg = <reg>
1476 cfa_temp.offset = <const_int>
1477
1478 Rule 10:
1479 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1480 effects: cfa_store.offset -= <const_int>
1481 cfa.offset = cfa_store.offset if cfa.reg == sp
1482 cfa.reg = sp
1483 cfa.base_offset = -cfa_store.offset
1484
1485 Rule 11:
1486 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1487 effects: cfa_store.offset += -/+ mode_size(mem)
1488 cfa.offset = cfa_store.offset if cfa.reg == sp
1489 cfa.reg = sp
1490 cfa.base_offset = -cfa_store.offset
1491
1492 Rule 12:
1493 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1494
1495 <reg2>)
1496 effects: cfa.reg = <reg1>
1497 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1498
1499 Rule 13:
1500 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1501 effects: cfa.reg = <reg1>
1502 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1503
1504 Rule 14:
1505 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1506 effects: cfa.reg = <reg1>
1507 cfa.base_offset = -cfa_temp.offset
1508 cfa_temp.offset -= mode_size(mem)
1509
1510 Rule 15:
1511 (set <reg> {unspec, unspec_volatile})
1512 effects: target-dependent
1513
1514 Rule 16:
1515 (set sp (and: sp <const_int>))
1516 constraints: cfa_store.reg == sp
1517 effects: cfun->fde.stack_realign = 1
1518 cfa_store.offset = 0
1519 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1520
1521 Rule 17:
1522 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1523 effects: cfa_store.offset += -/+ mode_size(mem)
1524
1525 Rule 18:
1526 (set (mem ({pre_inc, pre_dec} sp)) fp)
1527 constraints: fde->stack_realign == 1
1528 effects: cfa_store.offset = 0
1529 cfa.reg != HARD_FRAME_POINTER_REGNUM
1530
1531 Rule 19:
1532 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1533 constraints: fde->stack_realign == 1
1534 && cfa.offset == 0
1535 && cfa.indirect == 0
1536 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1537 effects: Use DW_CFA_def_cfa_expression to define cfa
1538 cfa.reg == fde->drap_reg */
1539
1540 static void
1541 dwarf2out_frame_debug_expr (rtx expr)
1542 {
1543 rtx src, dest, span;
1544 HOST_WIDE_INT offset;
1545 dw_fde_ref fde;
1546
1547 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1548 the PARALLEL independently. The first element is always processed if
1549 it is a SET. This is for backward compatibility. Other elements
1550 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1551 flag is set in them. */
1552 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1553 {
1554 int par_index;
1555 int limit = XVECLEN (expr, 0);
1556 rtx elem;
1557
1558 /* PARALLELs have strict read-modify-write semantics, so we
1559 ought to evaluate every rvalue before changing any lvalue.
1560 It's cumbersome to do that in general, but there's an
1561 easy approximation that is enough for all current users:
1562 handle register saves before register assignments. */
1563 if (GET_CODE (expr) == PARALLEL)
1564 for (par_index = 0; par_index < limit; par_index++)
1565 {
1566 elem = XVECEXP (expr, 0, par_index);
1567 if (GET_CODE (elem) == SET
1568 && MEM_P (SET_DEST (elem))
1569 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1570 dwarf2out_frame_debug_expr (elem);
1571 }
1572
1573 for (par_index = 0; par_index < limit; par_index++)
1574 {
1575 elem = XVECEXP (expr, 0, par_index);
1576 if (GET_CODE (elem) == SET
1577 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1578 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1579 dwarf2out_frame_debug_expr (elem);
1580 }
1581 return;
1582 }
1583
1584 gcc_assert (GET_CODE (expr) == SET);
1585
1586 src = SET_SRC (expr);
1587 dest = SET_DEST (expr);
1588
1589 if (REG_P (src))
1590 {
1591 rtx rsi = reg_saved_in (src);
1592 if (rsi)
1593 src = rsi;
1594 }
1595
1596 fde = cfun->fde;
1597
1598 switch (GET_CODE (dest))
1599 {
1600 case REG:
1601 switch (GET_CODE (src))
1602 {
1603 /* Setting FP from SP. */
1604 case REG:
1605 if (cur_cfa->reg == dwf_regno (src))
1606 {
1607 /* Rule 1 */
1608 /* Update the CFA rule wrt SP or FP. Make sure src is
1609 relative to the current CFA register.
1610
1611 We used to require that dest be either SP or FP, but the
1612 ARM copies SP to a temporary register, and from there to
1613 FP. So we just rely on the backends to only set
1614 RTX_FRAME_RELATED_P on appropriate insns. */
1615 cur_cfa->reg = dwf_regno (dest);
1616 cur_trace->cfa_temp.reg = cur_cfa->reg;
1617 cur_trace->cfa_temp.offset = cur_cfa->offset;
1618 }
1619 else
1620 {
1621 /* Saving a register in a register. */
1622 gcc_assert (!fixed_regs [REGNO (dest)]
1623 /* For the SPARC and its register window. */
1624 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1625
1626 /* After stack is aligned, we can only save SP in FP
1627 if drap register is used. In this case, we have
1628 to restore stack pointer with the CFA value and we
1629 don't generate this DWARF information. */
1630 if (fde
1631 && fde->stack_realign
1632 && REGNO (src) == STACK_POINTER_REGNUM)
1633 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1634 && fde->drap_reg != INVALID_REGNUM
1635 && cur_cfa->reg != dwf_regno (src));
1636 else
1637 queue_reg_save (src, dest, 0);
1638 }
1639 break;
1640
1641 case PLUS:
1642 case MINUS:
1643 case LO_SUM:
1644 if (dest == stack_pointer_rtx)
1645 {
1646 /* Rule 2 */
1647 /* Adjusting SP. */
1648 switch (GET_CODE (XEXP (src, 1)))
1649 {
1650 case CONST_INT:
1651 offset = INTVAL (XEXP (src, 1));
1652 break;
1653 case REG:
1654 gcc_assert (dwf_regno (XEXP (src, 1))
1655 == cur_trace->cfa_temp.reg);
1656 offset = cur_trace->cfa_temp.offset;
1657 break;
1658 default:
1659 gcc_unreachable ();
1660 }
1661
1662 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1663 {
1664 /* Restoring SP from FP in the epilogue. */
1665 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1666 cur_cfa->reg = dw_stack_pointer_regnum;
1667 }
1668 else if (GET_CODE (src) == LO_SUM)
1669 /* Assume we've set the source reg of the LO_SUM from sp. */
1670 ;
1671 else
1672 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1673
1674 if (GET_CODE (src) != MINUS)
1675 offset = -offset;
1676 if (cur_cfa->reg == dw_stack_pointer_regnum)
1677 cur_cfa->offset += offset;
1678 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1679 cur_trace->cfa_store.offset += offset;
1680 }
1681 else if (dest == hard_frame_pointer_rtx)
1682 {
1683 /* Rule 3 */
1684 /* Either setting the FP from an offset of the SP,
1685 or adjusting the FP */
1686 gcc_assert (frame_pointer_needed);
1687
1688 gcc_assert (REG_P (XEXP (src, 0))
1689 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1690 && CONST_INT_P (XEXP (src, 1)));
1691 offset = INTVAL (XEXP (src, 1));
1692 if (GET_CODE (src) != MINUS)
1693 offset = -offset;
1694 cur_cfa->offset += offset;
1695 cur_cfa->reg = dw_frame_pointer_regnum;
1696 }
1697 else
1698 {
1699 gcc_assert (GET_CODE (src) != MINUS);
1700
1701 /* Rule 4 */
1702 if (REG_P (XEXP (src, 0))
1703 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1704 && CONST_INT_P (XEXP (src, 1)))
1705 {
1706 /* Setting a temporary CFA register that will be copied
1707 into the FP later on. */
1708 offset = - INTVAL (XEXP (src, 1));
1709 cur_cfa->offset += offset;
1710 cur_cfa->reg = dwf_regno (dest);
1711 /* Or used to save regs to the stack. */
1712 cur_trace->cfa_temp.reg = cur_cfa->reg;
1713 cur_trace->cfa_temp.offset = cur_cfa->offset;
1714 }
1715
1716 /* Rule 5 */
1717 else if (REG_P (XEXP (src, 0))
1718 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1719 && XEXP (src, 1) == stack_pointer_rtx)
1720 {
1721 /* Setting a scratch register that we will use instead
1722 of SP for saving registers to the stack. */
1723 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1724 cur_trace->cfa_store.reg = dwf_regno (dest);
1725 cur_trace->cfa_store.offset
1726 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1727 }
1728
1729 /* Rule 9 */
1730 else if (GET_CODE (src) == LO_SUM
1731 && CONST_INT_P (XEXP (src, 1)))
1732 {
1733 cur_trace->cfa_temp.reg = dwf_regno (dest);
1734 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1735 }
1736 else
1737 gcc_unreachable ();
1738 }
1739 break;
1740
1741 /* Rule 6 */
1742 case CONST_INT:
1743 cur_trace->cfa_temp.reg = dwf_regno (dest);
1744 cur_trace->cfa_temp.offset = INTVAL (src);
1745 break;
1746
1747 /* Rule 7 */
1748 case IOR:
1749 gcc_assert (REG_P (XEXP (src, 0))
1750 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1751 && CONST_INT_P (XEXP (src, 1)));
1752
1753 cur_trace->cfa_temp.reg = dwf_regno (dest);
1754 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1755 break;
1756
1757 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1758 which will fill in all of the bits. */
1759 /* Rule 8 */
1760 case HIGH:
1761 break;
1762
1763 /* Rule 15 */
1764 case UNSPEC:
1765 case UNSPEC_VOLATILE:
1766 /* All unspecs should be represented by REG_CFA_* notes. */
1767 gcc_unreachable ();
1768 return;
1769
1770 /* Rule 16 */
1771 case AND:
1772 /* If this AND operation happens on stack pointer in prologue,
1773 we assume the stack is realigned and we extract the
1774 alignment. */
1775 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1776 {
1777 /* We interpret reg_save differently with stack_realign set.
1778 Thus we must flush whatever we have queued first. */
1779 dwarf2out_flush_queued_reg_saves ();
1780
1781 gcc_assert (cur_trace->cfa_store.reg
1782 == dwf_regno (XEXP (src, 0)));
1783 fde->stack_realign = 1;
1784 fde->stack_realignment = INTVAL (XEXP (src, 1));
1785 cur_trace->cfa_store.offset = 0;
1786
1787 if (cur_cfa->reg != dw_stack_pointer_regnum
1788 && cur_cfa->reg != dw_frame_pointer_regnum)
1789 fde->drap_reg = cur_cfa->reg;
1790 }
1791 return;
1792
1793 default:
1794 gcc_unreachable ();
1795 }
1796 break;
1797
1798 case MEM:
1799
1800 /* Saving a register to the stack. Make sure dest is relative to the
1801 CFA register. */
1802 switch (GET_CODE (XEXP (dest, 0)))
1803 {
1804 /* Rule 10 */
1805 /* With a push. */
1806 case PRE_MODIFY:
1807 case POST_MODIFY:
1808 /* We can't handle variable size modifications. */
1809 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1810 == CONST_INT);
1811 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1812
1813 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1814 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1815
1816 cur_trace->cfa_store.offset += offset;
1817 if (cur_cfa->reg == dw_stack_pointer_regnum)
1818 cur_cfa->offset = cur_trace->cfa_store.offset;
1819
1820 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1821 offset -= cur_trace->cfa_store.offset;
1822 else
1823 offset = -cur_trace->cfa_store.offset;
1824 break;
1825
1826 /* Rule 11 */
1827 case PRE_INC:
1828 case PRE_DEC:
1829 case POST_DEC:
1830 offset = GET_MODE_SIZE (GET_MODE (dest));
1831 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1832 offset = -offset;
1833
1834 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1835 == STACK_POINTER_REGNUM)
1836 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1837
1838 cur_trace->cfa_store.offset += offset;
1839
1840 /* Rule 18: If stack is aligned, we will use FP as a
1841 reference to represent the address of the stored
1842 regiser. */
1843 if (fde
1844 && fde->stack_realign
1845 && REG_P (src)
1846 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1847 {
1848 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1849 cur_trace->cfa_store.offset = 0;
1850 }
1851
1852 if (cur_cfa->reg == dw_stack_pointer_regnum)
1853 cur_cfa->offset = cur_trace->cfa_store.offset;
1854
1855 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1856 offset += -cur_trace->cfa_store.offset;
1857 else
1858 offset = -cur_trace->cfa_store.offset;
1859 break;
1860
1861 /* Rule 12 */
1862 /* With an offset. */
1863 case PLUS:
1864 case MINUS:
1865 case LO_SUM:
1866 {
1867 unsigned int regno;
1868
1869 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1870 && REG_P (XEXP (XEXP (dest, 0), 0)));
1871 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1872 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1873 offset = -offset;
1874
1875 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1876
1877 if (cur_cfa->reg == regno)
1878 offset -= cur_cfa->offset;
1879 else if (cur_trace->cfa_store.reg == regno)
1880 offset -= cur_trace->cfa_store.offset;
1881 else
1882 {
1883 gcc_assert (cur_trace->cfa_temp.reg == regno);
1884 offset -= cur_trace->cfa_temp.offset;
1885 }
1886 }
1887 break;
1888
1889 /* Rule 13 */
1890 /* Without an offset. */
1891 case REG:
1892 {
1893 unsigned int regno = dwf_regno (XEXP (dest, 0));
1894
1895 if (cur_cfa->reg == regno)
1896 offset = -cur_cfa->offset;
1897 else if (cur_trace->cfa_store.reg == regno)
1898 offset = -cur_trace->cfa_store.offset;
1899 else
1900 {
1901 gcc_assert (cur_trace->cfa_temp.reg == regno);
1902 offset = -cur_trace->cfa_temp.offset;
1903 }
1904 }
1905 break;
1906
1907 /* Rule 14 */
1908 case POST_INC:
1909 gcc_assert (cur_trace->cfa_temp.reg
1910 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1911 offset = -cur_trace->cfa_temp.offset;
1912 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1913 break;
1914
1915 default:
1916 gcc_unreachable ();
1917 }
1918
1919 /* Rule 17 */
1920 /* If the source operand of this MEM operation is a memory,
1921 we only care how much stack grew. */
1922 if (MEM_P (src))
1923 break;
1924
1925 if (REG_P (src)
1926 && REGNO (src) != STACK_POINTER_REGNUM
1927 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1928 && dwf_regno (src) == cur_cfa->reg)
1929 {
1930 /* We're storing the current CFA reg into the stack. */
1931
1932 if (cur_cfa->offset == 0)
1933 {
1934 /* Rule 19 */
1935 /* If stack is aligned, putting CFA reg into stack means
1936 we can no longer use reg + offset to represent CFA.
1937 Here we use DW_CFA_def_cfa_expression instead. The
1938 result of this expression equals to the original CFA
1939 value. */
1940 if (fde
1941 && fde->stack_realign
1942 && cur_cfa->indirect == 0
1943 && cur_cfa->reg != dw_frame_pointer_regnum)
1944 {
1945 gcc_assert (fde->drap_reg == cur_cfa->reg);
1946
1947 cur_cfa->indirect = 1;
1948 cur_cfa->reg = dw_frame_pointer_regnum;
1949 cur_cfa->base_offset = offset;
1950 cur_cfa->offset = 0;
1951
1952 fde->drap_reg_saved = 1;
1953 break;
1954 }
1955
1956 /* If the source register is exactly the CFA, assume
1957 we're saving SP like any other register; this happens
1958 on the ARM. */
1959 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1960 break;
1961 }
1962 else
1963 {
1964 /* Otherwise, we'll need to look in the stack to
1965 calculate the CFA. */
1966 rtx x = XEXP (dest, 0);
1967
1968 if (!REG_P (x))
1969 x = XEXP (x, 0);
1970 gcc_assert (REG_P (x));
1971
1972 cur_cfa->reg = dwf_regno (x);
1973 cur_cfa->base_offset = offset;
1974 cur_cfa->indirect = 1;
1975 break;
1976 }
1977 }
1978
1979 if (REG_P (src))
1980 span = targetm.dwarf_register_span (src);
1981 else
1982 span = NULL;
1983
1984 if (!span)
1985 queue_reg_save (src, NULL_RTX, offset);
1986 else
1987 {
1988 /* We have a PARALLEL describing where the contents of SRC live.
1989 Queue register saves for each piece of the PARALLEL. */
1990 HOST_WIDE_INT span_offset = offset;
1991
1992 gcc_assert (GET_CODE (span) == PARALLEL);
1993
1994 const int par_len = XVECLEN (span, 0);
1995 for (int par_index = 0; par_index < par_len; par_index++)
1996 {
1997 rtx elem = XVECEXP (span, 0, par_index);
1998 queue_reg_save (elem, NULL_RTX, span_offset);
1999 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2000 }
2001 }
2002 break;
2003
2004 default:
2005 gcc_unreachable ();
2006 }
2007 }
2008
2009 /* Record call frame debugging information for INSN, which either sets
2010 SP or FP (adjusting how we calculate the frame address) or saves a
2011 register to the stack. */
2012
2013 static void
2014 dwarf2out_frame_debug (rtx_insn *insn)
2015 {
2016 rtx note, n, pat;
2017 bool handled_one = false;
2018
2019 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2020 switch (REG_NOTE_KIND (note))
2021 {
2022 case REG_FRAME_RELATED_EXPR:
2023 pat = XEXP (note, 0);
2024 goto do_frame_expr;
2025
2026 case REG_CFA_DEF_CFA:
2027 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2028 handled_one = true;
2029 break;
2030
2031 case REG_CFA_ADJUST_CFA:
2032 n = XEXP (note, 0);
2033 if (n == NULL)
2034 {
2035 n = PATTERN (insn);
2036 if (GET_CODE (n) == PARALLEL)
2037 n = XVECEXP (n, 0, 0);
2038 }
2039 dwarf2out_frame_debug_adjust_cfa (n);
2040 handled_one = true;
2041 break;
2042
2043 case REG_CFA_OFFSET:
2044 n = XEXP (note, 0);
2045 if (n == NULL)
2046 n = single_set (insn);
2047 dwarf2out_frame_debug_cfa_offset (n);
2048 handled_one = true;
2049 break;
2050
2051 case REG_CFA_REGISTER:
2052 n = XEXP (note, 0);
2053 if (n == NULL)
2054 {
2055 n = PATTERN (insn);
2056 if (GET_CODE (n) == PARALLEL)
2057 n = XVECEXP (n, 0, 0);
2058 }
2059 dwarf2out_frame_debug_cfa_register (n);
2060 handled_one = true;
2061 break;
2062
2063 case REG_CFA_EXPRESSION:
2064 n = XEXP (note, 0);
2065 if (n == NULL)
2066 n = single_set (insn);
2067 dwarf2out_frame_debug_cfa_expression (n);
2068 handled_one = true;
2069 break;
2070
2071 case REG_CFA_RESTORE:
2072 n = XEXP (note, 0);
2073 if (n == NULL)
2074 {
2075 n = PATTERN (insn);
2076 if (GET_CODE (n) == PARALLEL)
2077 n = XVECEXP (n, 0, 0);
2078 n = XEXP (n, 0);
2079 }
2080 dwarf2out_frame_debug_cfa_restore (n);
2081 handled_one = true;
2082 break;
2083
2084 case REG_CFA_SET_VDRAP:
2085 n = XEXP (note, 0);
2086 if (REG_P (n))
2087 {
2088 dw_fde_ref fde = cfun->fde;
2089 if (fde)
2090 {
2091 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2092 if (REG_P (n))
2093 fde->vdrap_reg = dwf_regno (n);
2094 }
2095 }
2096 handled_one = true;
2097 break;
2098
2099 case REG_CFA_WINDOW_SAVE:
2100 dwarf2out_frame_debug_cfa_window_save ();
2101 handled_one = true;
2102 break;
2103
2104 case REG_CFA_FLUSH_QUEUE:
2105 /* The actual flush happens elsewhere. */
2106 handled_one = true;
2107 break;
2108
2109 default:
2110 break;
2111 }
2112
2113 if (!handled_one)
2114 {
2115 pat = PATTERN (insn);
2116 do_frame_expr:
2117 dwarf2out_frame_debug_expr (pat);
2118
2119 /* Check again. A parallel can save and update the same register.
2120 We could probably check just once, here, but this is safer than
2121 removing the check at the start of the function. */
2122 if (clobbers_queued_reg_save (pat))
2123 dwarf2out_flush_queued_reg_saves ();
2124 }
2125 }
2126
2127 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2128
2129 static void
2130 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2131 {
2132 size_t i, n_old, n_new, n_max;
2133 dw_cfi_ref cfi;
2134
2135 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2136 add_cfi (new_row->cfa_cfi);
2137 else
2138 {
2139 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2140 if (cfi)
2141 add_cfi (cfi);
2142 }
2143
2144 n_old = vec_safe_length (old_row->reg_save);
2145 n_new = vec_safe_length (new_row->reg_save);
2146 n_max = MAX (n_old, n_new);
2147
2148 for (i = 0; i < n_max; ++i)
2149 {
2150 dw_cfi_ref r_old = NULL, r_new = NULL;
2151
2152 if (i < n_old)
2153 r_old = (*old_row->reg_save)[i];
2154 if (i < n_new)
2155 r_new = (*new_row->reg_save)[i];
2156
2157 if (r_old == r_new)
2158 ;
2159 else if (r_new == NULL)
2160 add_cfi_restore (i);
2161 else if (!cfi_equal_p (r_old, r_new))
2162 add_cfi (r_new);
2163 }
2164 }
2165
2166 /* Examine CFI and return true if a cfi label and set_loc is needed
2167 beforehand. Even when generating CFI assembler instructions, we
2168 still have to add the cfi to the list so that lookup_cfa_1 works
2169 later on. When -g2 and above we even need to force emitting of
2170 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2171 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2172 and so don't use convert_cfa_to_fb_loc_list. */
2173
2174 static bool
2175 cfi_label_required_p (dw_cfi_ref cfi)
2176 {
2177 if (!dwarf2out_do_cfi_asm ())
2178 return true;
2179
2180 if (dwarf_version == 2
2181 && debug_info_level > DINFO_LEVEL_TERSE
2182 && (write_symbols == DWARF2_DEBUG
2183 || write_symbols == VMS_AND_DWARF2_DEBUG))
2184 {
2185 switch (cfi->dw_cfi_opc)
2186 {
2187 case DW_CFA_def_cfa_offset:
2188 case DW_CFA_def_cfa_offset_sf:
2189 case DW_CFA_def_cfa_register:
2190 case DW_CFA_def_cfa:
2191 case DW_CFA_def_cfa_sf:
2192 case DW_CFA_def_cfa_expression:
2193 case DW_CFA_restore_state:
2194 return true;
2195 default:
2196 return false;
2197 }
2198 }
2199 return false;
2200 }
2201
2202 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2203 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2204 necessary. */
2205 static void
2206 add_cfis_to_fde (void)
2207 {
2208 dw_fde_ref fde = cfun->fde;
2209 rtx_insn *insn, *next;
2210 /* We always start with a function_begin label. */
2211 bool first = false;
2212
2213 for (insn = get_insns (); insn; insn = next)
2214 {
2215 next = NEXT_INSN (insn);
2216
2217 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2218 {
2219 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2220 /* Don't attempt to advance_loc4 between labels
2221 in different sections. */
2222 first = true;
2223 }
2224
2225 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2226 {
2227 bool required = cfi_label_required_p (NOTE_CFI (insn));
2228 while (next)
2229 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2230 {
2231 required |= cfi_label_required_p (NOTE_CFI (next));
2232 next = NEXT_INSN (next);
2233 }
2234 else if (active_insn_p (next)
2235 || (NOTE_P (next) && (NOTE_KIND (next)
2236 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2237 break;
2238 else
2239 next = NEXT_INSN (next);
2240 if (required)
2241 {
2242 int num = dwarf2out_cfi_label_num;
2243 const char *label = dwarf2out_cfi_label ();
2244 dw_cfi_ref xcfi;
2245
2246 /* Set the location counter to the new label. */
2247 xcfi = new_cfi ();
2248 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2249 : DW_CFA_advance_loc4);
2250 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2251 vec_safe_push (fde->dw_fde_cfi, xcfi);
2252
2253 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2254 NOTE_LABEL_NUMBER (tmp) = num;
2255 }
2256
2257 do
2258 {
2259 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2260 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2261 insn = NEXT_INSN (insn);
2262 }
2263 while (insn != next);
2264 first = false;
2265 }
2266 }
2267 }
2268
2269 /* If LABEL is the start of a trace, then initialize the state of that
2270 trace from CUR_TRACE and CUR_ROW. */
2271
2272 static void
2273 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2274 {
2275 dw_trace_info *ti;
2276 HOST_WIDE_INT args_size;
2277
2278 ti = get_trace_info (start);
2279 gcc_assert (ti != NULL);
2280
2281 if (dump_file)
2282 {
2283 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2284 cur_trace->id, ti->id,
2285 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2286 (origin ? INSN_UID (origin) : 0));
2287 }
2288
2289 args_size = cur_trace->end_true_args_size;
2290 if (ti->beg_row == NULL)
2291 {
2292 /* This is the first time we've encountered this trace. Propagate
2293 state across the edge and push the trace onto the work list. */
2294 ti->beg_row = copy_cfi_row (cur_row);
2295 ti->beg_true_args_size = args_size;
2296
2297 ti->cfa_store = cur_trace->cfa_store;
2298 ti->cfa_temp = cur_trace->cfa_temp;
2299 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2300
2301 trace_work_list.safe_push (ti);
2302
2303 if (dump_file)
2304 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2305 }
2306 else
2307 {
2308
2309 /* We ought to have the same state incoming to a given trace no
2310 matter how we arrive at the trace. Anything else means we've
2311 got some kind of optimization error. */
2312 gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
2313
2314 /* The args_size is allowed to conflict if it isn't actually used. */
2315 if (ti->beg_true_args_size != args_size)
2316 ti->args_size_undefined = true;
2317 }
2318 }
2319
2320 /* Similarly, but handle the args_size and CFA reset across EH
2321 and non-local goto edges. */
2322
2323 static void
2324 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2325 {
2326 HOST_WIDE_INT save_args_size, delta;
2327 dw_cfa_location save_cfa;
2328
2329 save_args_size = cur_trace->end_true_args_size;
2330 if (save_args_size == 0)
2331 {
2332 maybe_record_trace_start (start, origin);
2333 return;
2334 }
2335
2336 delta = -save_args_size;
2337 cur_trace->end_true_args_size = 0;
2338
2339 save_cfa = cur_row->cfa;
2340 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2341 {
2342 /* Convert a change in args_size (always a positive in the
2343 direction of stack growth) to a change in stack pointer. */
2344 if (!STACK_GROWS_DOWNWARD)
2345 delta = -delta;
2346
2347 cur_row->cfa.offset += delta;
2348 }
2349
2350 maybe_record_trace_start (start, origin);
2351
2352 cur_trace->end_true_args_size = save_args_size;
2353 cur_row->cfa = save_cfa;
2354 }
2355
2356 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2357 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2358
2359 static void
2360 create_trace_edges (rtx_insn *insn)
2361 {
2362 rtx tmp;
2363 int i, n;
2364
2365 if (JUMP_P (insn))
2366 {
2367 rtx_jump_table_data *table;
2368
2369 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2370 return;
2371
2372 if (tablejump_p (insn, NULL, &table))
2373 {
2374 rtvec vec = table->get_labels ();
2375
2376 n = GET_NUM_ELEM (vec);
2377 for (i = 0; i < n; ++i)
2378 {
2379 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2380 maybe_record_trace_start (lab, insn);
2381 }
2382 }
2383 else if (computed_jump_p (insn))
2384 {
2385 for (rtx_insn_list *lab = forced_labels; lab; lab = lab->next ())
2386 maybe_record_trace_start (lab->insn (), insn);
2387 }
2388 else if (returnjump_p (insn))
2389 ;
2390 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2391 {
2392 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2393 for (i = 0; i < n; ++i)
2394 {
2395 rtx_insn *lab =
2396 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2397 maybe_record_trace_start (lab, insn);
2398 }
2399 }
2400 else
2401 {
2402 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2403 gcc_assert (lab != NULL);
2404 maybe_record_trace_start (lab, insn);
2405 }
2406 }
2407 else if (CALL_P (insn))
2408 {
2409 /* Sibling calls don't have edges inside this function. */
2410 if (SIBLING_CALL_P (insn))
2411 return;
2412
2413 /* Process non-local goto edges. */
2414 if (can_nonlocal_goto (insn))
2415 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2416 lab;
2417 lab = lab->next ())
2418 maybe_record_trace_start_abnormal (lab->insn (), insn);
2419 }
2420 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2421 {
2422 int i, n = seq->len ();
2423 for (i = 0; i < n; ++i)
2424 create_trace_edges (seq->insn (i));
2425 return;
2426 }
2427
2428 /* Process EH edges. */
2429 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2430 {
2431 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2432 if (lp)
2433 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2434 }
2435 }
2436
2437 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2438
2439 static void
2440 scan_insn_after (rtx_insn *insn)
2441 {
2442 if (RTX_FRAME_RELATED_P (insn))
2443 dwarf2out_frame_debug (insn);
2444 notice_args_size (insn);
2445 }
2446
2447 /* Scan the trace beginning at INSN and create the CFI notes for the
2448 instructions therein. */
2449
2450 static void
2451 scan_trace (dw_trace_info *trace)
2452 {
2453 rtx_insn *prev, *insn = trace->head;
2454 dw_cfa_location this_cfa;
2455
2456 if (dump_file)
2457 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2458 trace->id, rtx_name[(int) GET_CODE (insn)],
2459 INSN_UID (insn));
2460
2461 trace->end_row = copy_cfi_row (trace->beg_row);
2462 trace->end_true_args_size = trace->beg_true_args_size;
2463
2464 cur_trace = trace;
2465 cur_row = trace->end_row;
2466
2467 this_cfa = cur_row->cfa;
2468 cur_cfa = &this_cfa;
2469
2470 for (prev = insn, insn = NEXT_INSN (insn);
2471 insn;
2472 prev = insn, insn = NEXT_INSN (insn))
2473 {
2474 rtx_insn *control;
2475
2476 /* Do everything that happens "before" the insn. */
2477 add_cfi_insn = prev;
2478
2479 /* Notice the end of a trace. */
2480 if (BARRIER_P (insn))
2481 {
2482 /* Don't bother saving the unneeded queued registers at all. */
2483 queued_reg_saves.truncate (0);
2484 break;
2485 }
2486 if (save_point_p (insn))
2487 {
2488 /* Propagate across fallthru edges. */
2489 dwarf2out_flush_queued_reg_saves ();
2490 maybe_record_trace_start (insn, NULL);
2491 break;
2492 }
2493
2494 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2495 continue;
2496
2497 /* Handle all changes to the row state. Sequences require special
2498 handling for the positioning of the notes. */
2499 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2500 {
2501 rtx_insn *elt;
2502 int i, n = pat->len ();
2503
2504 control = pat->insn (0);
2505 if (can_throw_internal (control))
2506 notice_eh_throw (control);
2507 dwarf2out_flush_queued_reg_saves ();
2508
2509 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2510 {
2511 /* ??? Hopefully multiple delay slots are not annulled. */
2512 gcc_assert (n == 2);
2513 gcc_assert (!RTX_FRAME_RELATED_P (control));
2514 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2515
2516 elt = pat->insn (1);
2517
2518 if (INSN_FROM_TARGET_P (elt))
2519 {
2520 HOST_WIDE_INT restore_args_size;
2521 cfi_vec save_row_reg_save;
2522
2523 /* If ELT is an instruction from target of an annulled
2524 branch, the effects are for the target only and so
2525 the args_size and CFA along the current path
2526 shouldn't change. */
2527 add_cfi_insn = NULL;
2528 restore_args_size = cur_trace->end_true_args_size;
2529 cur_cfa = &cur_row->cfa;
2530 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2531
2532 scan_insn_after (elt);
2533
2534 /* ??? Should we instead save the entire row state? */
2535 gcc_assert (!queued_reg_saves.length ());
2536
2537 create_trace_edges (control);
2538
2539 cur_trace->end_true_args_size = restore_args_size;
2540 cur_row->cfa = this_cfa;
2541 cur_row->reg_save = save_row_reg_save;
2542 cur_cfa = &this_cfa;
2543 }
2544 else
2545 {
2546 /* If ELT is a annulled branch-taken instruction (i.e.
2547 executed only when branch is not taken), the args_size
2548 and CFA should not change through the jump. */
2549 create_trace_edges (control);
2550
2551 /* Update and continue with the trace. */
2552 add_cfi_insn = insn;
2553 scan_insn_after (elt);
2554 def_cfa_1 (&this_cfa);
2555 }
2556 continue;
2557 }
2558
2559 /* The insns in the delay slot should all be considered to happen
2560 "before" a call insn. Consider a call with a stack pointer
2561 adjustment in the delay slot. The backtrace from the callee
2562 should include the sp adjustment. Unfortunately, that leaves
2563 us with an unavoidable unwinding error exactly at the call insn
2564 itself. For jump insns we'd prefer to avoid this error by
2565 placing the notes after the sequence. */
2566 if (JUMP_P (control))
2567 add_cfi_insn = insn;
2568
2569 for (i = 1; i < n; ++i)
2570 {
2571 elt = pat->insn (i);
2572 scan_insn_after (elt);
2573 }
2574
2575 /* Make sure any register saves are visible at the jump target. */
2576 dwarf2out_flush_queued_reg_saves ();
2577 any_cfis_emitted = false;
2578
2579 /* However, if there is some adjustment on the call itself, e.g.
2580 a call_pop, that action should be considered to happen after
2581 the call returns. */
2582 add_cfi_insn = insn;
2583 scan_insn_after (control);
2584 }
2585 else
2586 {
2587 /* Flush data before calls and jumps, and of course if necessary. */
2588 if (can_throw_internal (insn))
2589 {
2590 notice_eh_throw (insn);
2591 dwarf2out_flush_queued_reg_saves ();
2592 }
2593 else if (!NONJUMP_INSN_P (insn)
2594 || clobbers_queued_reg_save (insn)
2595 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2596 dwarf2out_flush_queued_reg_saves ();
2597 any_cfis_emitted = false;
2598
2599 add_cfi_insn = insn;
2600 scan_insn_after (insn);
2601 control = insn;
2602 }
2603
2604 /* Between frame-related-p and args_size we might have otherwise
2605 emitted two cfa adjustments. Do it now. */
2606 def_cfa_1 (&this_cfa);
2607
2608 /* Minimize the number of advances by emitting the entire queue
2609 once anything is emitted. */
2610 if (any_cfis_emitted
2611 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2612 dwarf2out_flush_queued_reg_saves ();
2613
2614 /* Note that a test for control_flow_insn_p does exactly the
2615 same tests as are done to actually create the edges. So
2616 always call the routine and let it not create edges for
2617 non-control-flow insns. */
2618 create_trace_edges (control);
2619 }
2620
2621 add_cfi_insn = NULL;
2622 cur_row = NULL;
2623 cur_trace = NULL;
2624 cur_cfa = NULL;
2625 }
2626
2627 /* Scan the function and create the initial set of CFI notes. */
2628
2629 static void
2630 create_cfi_notes (void)
2631 {
2632 dw_trace_info *ti;
2633
2634 gcc_checking_assert (!queued_reg_saves.exists ());
2635 gcc_checking_assert (!trace_work_list.exists ());
2636
2637 /* Always begin at the entry trace. */
2638 ti = &trace_info[0];
2639 scan_trace (ti);
2640
2641 while (!trace_work_list.is_empty ())
2642 {
2643 ti = trace_work_list.pop ();
2644 scan_trace (ti);
2645 }
2646
2647 queued_reg_saves.release ();
2648 trace_work_list.release ();
2649 }
2650
2651 /* Return the insn before the first NOTE_INSN_CFI after START. */
2652
2653 static rtx_insn *
2654 before_next_cfi_note (rtx_insn *start)
2655 {
2656 rtx_insn *prev = start;
2657 while (start)
2658 {
2659 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2660 return prev;
2661 prev = start;
2662 start = NEXT_INSN (start);
2663 }
2664 gcc_unreachable ();
2665 }
2666
2667 /* Insert CFI notes between traces to properly change state between them. */
2668
2669 static void
2670 connect_traces (void)
2671 {
2672 unsigned i, n = trace_info.length ();
2673 dw_trace_info *prev_ti, *ti;
2674
2675 /* ??? Ideally, we should have both queued and processed every trace.
2676 However the current representation of constant pools on various targets
2677 is indistinguishable from unreachable code. Assume for the moment that
2678 we can simply skip over such traces. */
2679 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2680 these are not "real" instructions, and should not be considered.
2681 This could be generically useful for tablejump data as well. */
2682 /* Remove all unprocessed traces from the list. */
2683 for (i = n - 1; i > 0; --i)
2684 {
2685 ti = &trace_info[i];
2686 if (ti->beg_row == NULL)
2687 {
2688 trace_info.ordered_remove (i);
2689 n -= 1;
2690 }
2691 else
2692 gcc_assert (ti->end_row != NULL);
2693 }
2694
2695 /* Work from the end back to the beginning. This lets us easily insert
2696 remember/restore_state notes in the correct order wrt other notes. */
2697 prev_ti = &trace_info[n - 1];
2698 for (i = n - 1; i > 0; --i)
2699 {
2700 dw_cfi_row *old_row;
2701
2702 ti = prev_ti;
2703 prev_ti = &trace_info[i - 1];
2704
2705 add_cfi_insn = ti->head;
2706
2707 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2708 for the portion of the function in the alternate text
2709 section. The row state at the very beginning of that
2710 new FDE will be exactly the row state from the CIE. */
2711 if (ti->switch_sections)
2712 old_row = cie_cfi_row;
2713 else
2714 {
2715 old_row = prev_ti->end_row;
2716 /* If there's no change from the previous end state, fine. */
2717 if (cfi_row_equal_p (old_row, ti->beg_row))
2718 ;
2719 /* Otherwise check for the common case of sharing state with
2720 the beginning of an epilogue, but not the end. Insert
2721 remember/restore opcodes in that case. */
2722 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2723 {
2724 dw_cfi_ref cfi;
2725
2726 /* Note that if we blindly insert the remember at the
2727 start of the trace, we can wind up increasing the
2728 size of the unwind info due to extra advance opcodes.
2729 Instead, put the remember immediately before the next
2730 state change. We know there must be one, because the
2731 state at the beginning and head of the trace differ. */
2732 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2733 cfi = new_cfi ();
2734 cfi->dw_cfi_opc = DW_CFA_remember_state;
2735 add_cfi (cfi);
2736
2737 add_cfi_insn = ti->head;
2738 cfi = new_cfi ();
2739 cfi->dw_cfi_opc = DW_CFA_restore_state;
2740 add_cfi (cfi);
2741
2742 old_row = prev_ti->beg_row;
2743 }
2744 /* Otherwise, we'll simply change state from the previous end. */
2745 }
2746
2747 change_cfi_row (old_row, ti->beg_row);
2748
2749 if (dump_file && add_cfi_insn != ti->head)
2750 {
2751 rtx_insn *note;
2752
2753 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2754 prev_ti->id, ti->id);
2755
2756 note = ti->head;
2757 do
2758 {
2759 note = NEXT_INSN (note);
2760 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2761 output_cfi_directive (dump_file, NOTE_CFI (note));
2762 }
2763 while (note != add_cfi_insn);
2764 }
2765 }
2766
2767 /* Connect args_size between traces that have can_throw_internal insns. */
2768 if (cfun->eh->lp_array)
2769 {
2770 HOST_WIDE_INT prev_args_size = 0;
2771
2772 for (i = 0; i < n; ++i)
2773 {
2774 ti = &trace_info[i];
2775
2776 if (ti->switch_sections)
2777 prev_args_size = 0;
2778 if (ti->eh_head == NULL)
2779 continue;
2780 gcc_assert (!ti->args_size_undefined);
2781
2782 if (ti->beg_delay_args_size != prev_args_size)
2783 {
2784 /* ??? Search back to previous CFI note. */
2785 add_cfi_insn = PREV_INSN (ti->eh_head);
2786 add_cfi_args_size (ti->beg_delay_args_size);
2787 }
2788
2789 prev_args_size = ti->end_delay_args_size;
2790 }
2791 }
2792 }
2793
2794 /* Set up the pseudo-cfg of instruction traces, as described at the
2795 block comment at the top of the file. */
2796
2797 static void
2798 create_pseudo_cfg (void)
2799 {
2800 bool saw_barrier, switch_sections;
2801 dw_trace_info ti;
2802 rtx_insn *insn;
2803 unsigned i;
2804
2805 /* The first trace begins at the start of the function,
2806 and begins with the CIE row state. */
2807 trace_info.create (16);
2808 memset (&ti, 0, sizeof (ti));
2809 ti.head = get_insns ();
2810 ti.beg_row = cie_cfi_row;
2811 ti.cfa_store = cie_cfi_row->cfa;
2812 ti.cfa_temp.reg = INVALID_REGNUM;
2813 trace_info.quick_push (ti);
2814
2815 if (cie_return_save)
2816 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2817
2818 /* Walk all the insns, collecting start of trace locations. */
2819 saw_barrier = false;
2820 switch_sections = false;
2821 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2822 {
2823 if (BARRIER_P (insn))
2824 saw_barrier = true;
2825 else if (NOTE_P (insn)
2826 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2827 {
2828 /* We should have just seen a barrier. */
2829 gcc_assert (saw_barrier);
2830 switch_sections = true;
2831 }
2832 /* Watch out for save_point notes between basic blocks.
2833 In particular, a note after a barrier. Do not record these,
2834 delaying trace creation until the label. */
2835 else if (save_point_p (insn)
2836 && (LABEL_P (insn) || !saw_barrier))
2837 {
2838 memset (&ti, 0, sizeof (ti));
2839 ti.head = insn;
2840 ti.switch_sections = switch_sections;
2841 ti.id = trace_info.length ();
2842 trace_info.safe_push (ti);
2843
2844 saw_barrier = false;
2845 switch_sections = false;
2846 }
2847 }
2848
2849 /* Create the trace index after we've finished building trace_info,
2850 avoiding stale pointer problems due to reallocation. */
2851 trace_index
2852 = new hash_table<trace_info_hasher> (trace_info.length ());
2853 dw_trace_info *tp;
2854 FOR_EACH_VEC_ELT (trace_info, i, tp)
2855 {
2856 dw_trace_info **slot;
2857
2858 if (dump_file)
2859 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2860 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2861 tp->switch_sections ? " (section switch)" : "");
2862
2863 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2864 gcc_assert (*slot == NULL);
2865 *slot = tp;
2866 }
2867 }
2868
2869 /* Record the initial position of the return address. RTL is
2870 INCOMING_RETURN_ADDR_RTX. */
2871
2872 static void
2873 initial_return_save (rtx rtl)
2874 {
2875 unsigned int reg = INVALID_REGNUM;
2876 HOST_WIDE_INT offset = 0;
2877
2878 switch (GET_CODE (rtl))
2879 {
2880 case REG:
2881 /* RA is in a register. */
2882 reg = dwf_regno (rtl);
2883 break;
2884
2885 case MEM:
2886 /* RA is on the stack. */
2887 rtl = XEXP (rtl, 0);
2888 switch (GET_CODE (rtl))
2889 {
2890 case REG:
2891 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2892 offset = 0;
2893 break;
2894
2895 case PLUS:
2896 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2897 offset = INTVAL (XEXP (rtl, 1));
2898 break;
2899
2900 case MINUS:
2901 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2902 offset = -INTVAL (XEXP (rtl, 1));
2903 break;
2904
2905 default:
2906 gcc_unreachable ();
2907 }
2908
2909 break;
2910
2911 case PLUS:
2912 /* The return address is at some offset from any value we can
2913 actually load. For instance, on the SPARC it is in %i7+8. Just
2914 ignore the offset for now; it doesn't matter for unwinding frames. */
2915 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2916 initial_return_save (XEXP (rtl, 0));
2917 return;
2918
2919 default:
2920 gcc_unreachable ();
2921 }
2922
2923 if (reg != DWARF_FRAME_RETURN_COLUMN)
2924 {
2925 if (reg != INVALID_REGNUM)
2926 record_reg_saved_in_reg (rtl, pc_rtx);
2927 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2928 }
2929 }
2930
2931 static void
2932 create_cie_data (void)
2933 {
2934 dw_cfa_location loc;
2935 dw_trace_info cie_trace;
2936
2937 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2938
2939 memset (&cie_trace, 0, sizeof (cie_trace));
2940 cur_trace = &cie_trace;
2941
2942 add_cfi_vec = &cie_cfi_vec;
2943 cie_cfi_row = cur_row = new_cfi_row ();
2944
2945 /* On entry, the Canonical Frame Address is at SP. */
2946 memset (&loc, 0, sizeof (loc));
2947 loc.reg = dw_stack_pointer_regnum;
2948 loc.offset = INCOMING_FRAME_SP_OFFSET;
2949 def_cfa_1 (&loc);
2950
2951 if (targetm.debug_unwind_info () == UI_DWARF2
2952 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2953 {
2954 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2955
2956 /* For a few targets, we have the return address incoming into a
2957 register, but choose a different return column. This will result
2958 in a DW_CFA_register for the return, and an entry in
2959 regs_saved_in_regs to match. If the target later stores that
2960 return address register to the stack, we want to be able to emit
2961 the DW_CFA_offset against the return column, not the intermediate
2962 save register. Save the contents of regs_saved_in_regs so that
2963 we can re-initialize it at the start of each function. */
2964 switch (cie_trace.regs_saved_in_regs.length ())
2965 {
2966 case 0:
2967 break;
2968 case 1:
2969 cie_return_save = ggc_alloc<reg_saved_in_data> ();
2970 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2971 cie_trace.regs_saved_in_regs.release ();
2972 break;
2973 default:
2974 gcc_unreachable ();
2975 }
2976 }
2977
2978 add_cfi_vec = NULL;
2979 cur_row = NULL;
2980 cur_trace = NULL;
2981 }
2982
2983 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2984 state at each location within the function. These notes will be
2985 emitted during pass_final. */
2986
2987 static unsigned int
2988 execute_dwarf2_frame (void)
2989 {
2990 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
2991 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2992
2993 /* The first time we're called, compute the incoming frame state. */
2994 if (cie_cfi_vec == NULL)
2995 create_cie_data ();
2996
2997 dwarf2out_alloc_current_fde ();
2998
2999 create_pseudo_cfg ();
3000
3001 /* Do the work. */
3002 create_cfi_notes ();
3003 connect_traces ();
3004 add_cfis_to_fde ();
3005
3006 /* Free all the data we allocated. */
3007 {
3008 size_t i;
3009 dw_trace_info *ti;
3010
3011 FOR_EACH_VEC_ELT (trace_info, i, ti)
3012 ti->regs_saved_in_regs.release ();
3013 }
3014 trace_info.release ();
3015
3016 delete trace_index;
3017 trace_index = NULL;
3018
3019 return 0;
3020 }
3021 \f
3022 /* Convert a DWARF call frame info. operation to its string name */
3023
3024 static const char *
3025 dwarf_cfi_name (unsigned int cfi_opc)
3026 {
3027 const char *name = get_DW_CFA_name (cfi_opc);
3028
3029 if (name != NULL)
3030 return name;
3031
3032 return "DW_CFA_<unknown>";
3033 }
3034
3035 /* This routine will generate the correct assembly data for a location
3036 description based on a cfi entry with a complex address. */
3037
3038 static void
3039 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3040 {
3041 dw_loc_descr_ref loc;
3042 unsigned long size;
3043
3044 if (cfi->dw_cfi_opc == DW_CFA_expression)
3045 {
3046 unsigned r =
3047 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3048 dw2_asm_output_data (1, r, NULL);
3049 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3050 }
3051 else
3052 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3053
3054 /* Output the size of the block. */
3055 size = size_of_locs (loc);
3056 dw2_asm_output_data_uleb128 (size, NULL);
3057
3058 /* Now output the operations themselves. */
3059 output_loc_sequence (loc, for_eh);
3060 }
3061
3062 /* Similar, but used for .cfi_escape. */
3063
3064 static void
3065 output_cfa_loc_raw (dw_cfi_ref cfi)
3066 {
3067 dw_loc_descr_ref loc;
3068 unsigned long size;
3069
3070 if (cfi->dw_cfi_opc == DW_CFA_expression)
3071 {
3072 unsigned r =
3073 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3074 fprintf (asm_out_file, "%#x,", r);
3075 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3076 }
3077 else
3078 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3079
3080 /* Output the size of the block. */
3081 size = size_of_locs (loc);
3082 dw2_asm_output_data_uleb128_raw (size);
3083 fputc (',', asm_out_file);
3084
3085 /* Now output the operations themselves. */
3086 output_loc_sequence_raw (loc);
3087 }
3088
3089 /* Output a Call Frame Information opcode and its operand(s). */
3090
3091 void
3092 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3093 {
3094 unsigned long r;
3095 HOST_WIDE_INT off;
3096
3097 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3098 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3099 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3100 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3101 ((unsigned HOST_WIDE_INT)
3102 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3103 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3104 {
3105 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3106 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3107 "DW_CFA_offset, column %#lx", r);
3108 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3109 dw2_asm_output_data_uleb128 (off, NULL);
3110 }
3111 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3112 {
3113 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3114 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3115 "DW_CFA_restore, column %#lx", r);
3116 }
3117 else
3118 {
3119 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3120 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3121
3122 switch (cfi->dw_cfi_opc)
3123 {
3124 case DW_CFA_set_loc:
3125 if (for_eh)
3126 dw2_asm_output_encoded_addr_rtx (
3127 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3128 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3129 false, NULL);
3130 else
3131 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3132 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3133 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3134 break;
3135
3136 case DW_CFA_advance_loc1:
3137 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3138 fde->dw_fde_current_label, NULL);
3139 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3140 break;
3141
3142 case DW_CFA_advance_loc2:
3143 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3144 fde->dw_fde_current_label, NULL);
3145 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3146 break;
3147
3148 case DW_CFA_advance_loc4:
3149 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3150 fde->dw_fde_current_label, NULL);
3151 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3152 break;
3153
3154 case DW_CFA_MIPS_advance_loc8:
3155 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3156 fde->dw_fde_current_label, NULL);
3157 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3158 break;
3159
3160 case DW_CFA_offset_extended:
3161 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3162 dw2_asm_output_data_uleb128 (r, NULL);
3163 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3164 dw2_asm_output_data_uleb128 (off, NULL);
3165 break;
3166
3167 case DW_CFA_def_cfa:
3168 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3169 dw2_asm_output_data_uleb128 (r, NULL);
3170 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3171 break;
3172
3173 case DW_CFA_offset_extended_sf:
3174 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3175 dw2_asm_output_data_uleb128 (r, NULL);
3176 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3177 dw2_asm_output_data_sleb128 (off, NULL);
3178 break;
3179
3180 case DW_CFA_def_cfa_sf:
3181 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3182 dw2_asm_output_data_uleb128 (r, NULL);
3183 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3184 dw2_asm_output_data_sleb128 (off, NULL);
3185 break;
3186
3187 case DW_CFA_restore_extended:
3188 case DW_CFA_undefined:
3189 case DW_CFA_same_value:
3190 case DW_CFA_def_cfa_register:
3191 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3192 dw2_asm_output_data_uleb128 (r, NULL);
3193 break;
3194
3195 case DW_CFA_register:
3196 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3197 dw2_asm_output_data_uleb128 (r, NULL);
3198 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3199 dw2_asm_output_data_uleb128 (r, NULL);
3200 break;
3201
3202 case DW_CFA_def_cfa_offset:
3203 case DW_CFA_GNU_args_size:
3204 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3205 break;
3206
3207 case DW_CFA_def_cfa_offset_sf:
3208 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3209 dw2_asm_output_data_sleb128 (off, NULL);
3210 break;
3211
3212 case DW_CFA_GNU_window_save:
3213 break;
3214
3215 case DW_CFA_def_cfa_expression:
3216 case DW_CFA_expression:
3217 output_cfa_loc (cfi, for_eh);
3218 break;
3219
3220 case DW_CFA_GNU_negative_offset_extended:
3221 /* Obsoleted by DW_CFA_offset_extended_sf. */
3222 gcc_unreachable ();
3223
3224 default:
3225 break;
3226 }
3227 }
3228 }
3229
3230 /* Similar, but do it via assembler directives instead. */
3231
3232 void
3233 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3234 {
3235 unsigned long r, r2;
3236
3237 switch (cfi->dw_cfi_opc)
3238 {
3239 case DW_CFA_advance_loc:
3240 case DW_CFA_advance_loc1:
3241 case DW_CFA_advance_loc2:
3242 case DW_CFA_advance_loc4:
3243 case DW_CFA_MIPS_advance_loc8:
3244 case DW_CFA_set_loc:
3245 /* Should only be created in a code path not followed when emitting
3246 via directives. The assembler is going to take care of this for
3247 us. But this routines is also used for debugging dumps, so
3248 print something. */
3249 gcc_assert (f != asm_out_file);
3250 fprintf (f, "\t.cfi_advance_loc\n");
3251 break;
3252
3253 case DW_CFA_offset:
3254 case DW_CFA_offset_extended:
3255 case DW_CFA_offset_extended_sf:
3256 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3257 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3258 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3259 break;
3260
3261 case DW_CFA_restore:
3262 case DW_CFA_restore_extended:
3263 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3264 fprintf (f, "\t.cfi_restore %lu\n", r);
3265 break;
3266
3267 case DW_CFA_undefined:
3268 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3269 fprintf (f, "\t.cfi_undefined %lu\n", r);
3270 break;
3271
3272 case DW_CFA_same_value:
3273 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3274 fprintf (f, "\t.cfi_same_value %lu\n", r);
3275 break;
3276
3277 case DW_CFA_def_cfa:
3278 case DW_CFA_def_cfa_sf:
3279 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3280 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3281 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3282 break;
3283
3284 case DW_CFA_def_cfa_register:
3285 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3286 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3287 break;
3288
3289 case DW_CFA_register:
3290 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3291 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3292 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3293 break;
3294
3295 case DW_CFA_def_cfa_offset:
3296 case DW_CFA_def_cfa_offset_sf:
3297 fprintf (f, "\t.cfi_def_cfa_offset "
3298 HOST_WIDE_INT_PRINT_DEC"\n",
3299 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3300 break;
3301
3302 case DW_CFA_remember_state:
3303 fprintf (f, "\t.cfi_remember_state\n");
3304 break;
3305 case DW_CFA_restore_state:
3306 fprintf (f, "\t.cfi_restore_state\n");
3307 break;
3308
3309 case DW_CFA_GNU_args_size:
3310 if (f == asm_out_file)
3311 {
3312 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3313 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3314 if (flag_debug_asm)
3315 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3316 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3317 fputc ('\n', f);
3318 }
3319 else
3320 {
3321 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3322 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3323 }
3324 break;
3325
3326 case DW_CFA_GNU_window_save:
3327 fprintf (f, "\t.cfi_window_save\n");
3328 break;
3329
3330 case DW_CFA_def_cfa_expression:
3331 if (f != asm_out_file)
3332 {
3333 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3334 break;
3335 }
3336 /* FALLTHRU */
3337 case DW_CFA_expression:
3338 if (f != asm_out_file)
3339 {
3340 fprintf (f, "\t.cfi_cfa_expression ...\n");
3341 break;
3342 }
3343 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3344 output_cfa_loc_raw (cfi);
3345 fputc ('\n', f);
3346 break;
3347
3348 default:
3349 gcc_unreachable ();
3350 }
3351 }
3352
3353 void
3354 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3355 {
3356 if (dwarf2out_do_cfi_asm ())
3357 output_cfi_directive (asm_out_file, cfi);
3358 }
3359
3360 static void
3361 dump_cfi_row (FILE *f, dw_cfi_row *row)
3362 {
3363 dw_cfi_ref cfi;
3364 unsigned i;
3365
3366 cfi = row->cfa_cfi;
3367 if (!cfi)
3368 {
3369 dw_cfa_location dummy;
3370 memset (&dummy, 0, sizeof (dummy));
3371 dummy.reg = INVALID_REGNUM;
3372 cfi = def_cfa_0 (&dummy, &row->cfa);
3373 }
3374 output_cfi_directive (f, cfi);
3375
3376 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3377 if (cfi)
3378 output_cfi_directive (f, cfi);
3379 }
3380
3381 void debug_cfi_row (dw_cfi_row *row);
3382
3383 void
3384 debug_cfi_row (dw_cfi_row *row)
3385 {
3386 dump_cfi_row (stderr, row);
3387 }
3388 \f
3389
3390 /* Save the result of dwarf2out_do_frame across PCH.
3391 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3392 static GTY(()) signed char saved_do_cfi_asm = 0;
3393
3394 /* Decide whether we want to emit frame unwind information for the current
3395 translation unit. */
3396
3397 bool
3398 dwarf2out_do_frame (void)
3399 {
3400 /* We want to emit correct CFA location expressions or lists, so we
3401 have to return true if we're going to output debug info, even if
3402 we're not going to output frame or unwind info. */
3403 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3404 return true;
3405
3406 if (saved_do_cfi_asm > 0)
3407 return true;
3408
3409 if (targetm.debug_unwind_info () == UI_DWARF2)
3410 return true;
3411
3412 if ((flag_unwind_tables || flag_exceptions)
3413 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3414 return true;
3415
3416 return false;
3417 }
3418
3419 /* Decide whether to emit frame unwind via assembler directives. */
3420
3421 bool
3422 dwarf2out_do_cfi_asm (void)
3423 {
3424 int enc;
3425
3426 if (saved_do_cfi_asm != 0)
3427 return saved_do_cfi_asm > 0;
3428
3429 /* Assume failure for a moment. */
3430 saved_do_cfi_asm = -1;
3431
3432 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3433 return false;
3434 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3435 return false;
3436
3437 /* Make sure the personality encoding is one the assembler can support.
3438 In particular, aligned addresses can't be handled. */
3439 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3440 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3441 return false;
3442 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3443 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3444 return false;
3445
3446 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3447 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3448 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3449 && !flag_unwind_tables && !flag_exceptions
3450 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3451 return false;
3452
3453 /* Success! */
3454 saved_do_cfi_asm = 1;
3455 return true;
3456 }
3457
3458 namespace {
3459
3460 const pass_data pass_data_dwarf2_frame =
3461 {
3462 RTL_PASS, /* type */
3463 "dwarf2", /* name */
3464 OPTGROUP_NONE, /* optinfo_flags */
3465 TV_FINAL, /* tv_id */
3466 0, /* properties_required */
3467 0, /* properties_provided */
3468 0, /* properties_destroyed */
3469 0, /* todo_flags_start */
3470 0, /* todo_flags_finish */
3471 };
3472
3473 class pass_dwarf2_frame : public rtl_opt_pass
3474 {
3475 public:
3476 pass_dwarf2_frame (gcc::context *ctxt)
3477 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3478 {}
3479
3480 /* opt_pass methods: */
3481 virtual bool gate (function *);
3482 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3483
3484 }; // class pass_dwarf2_frame
3485
3486 bool
3487 pass_dwarf2_frame::gate (function *)
3488 {
3489 #ifndef HAVE_prologue
3490 /* Targets which still implement the prologue in assembler text
3491 cannot use the generic dwarf2 unwinding. */
3492 return false;
3493 #endif
3494
3495 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3496 from the optimized shrink-wrapping annotations that we will compute.
3497 For now, only produce the CFI notes for dwarf2. */
3498 return dwarf2out_do_frame ();
3499 }
3500
3501 } // anon namespace
3502
3503 rtl_opt_pass *
3504 make_pass_dwarf2_frame (gcc::context *ctxt)
3505 {
3506 return new pass_dwarf2_frame (ctxt);
3507 }
3508
3509 #include "gt-dwarf2cfi.h"