]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/dwarf2cfi.c
tree-core.h: Include symtab.h.
[thirdparty/gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "version.h"
25 #include "flags.h"
26 #include "rtl.h"
27 #include "alias.h"
28 #include "tree.h"
29 #include "stor-layout.h"
30 #include "function.h"
31 #include "cfgbuild.h"
32 #include "dwarf2.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "tm_p.h"
36 #include "target.h"
37 #include "common/common-target.h"
38 #include "tree-pass.h"
39
40 #include "except.h" /* expand_builtin_dwarf_sp_column */
41 #include "insn-config.h"
42 #include "expmed.h"
43 #include "dojump.h"
44 #include "explow.h"
45 #include "calls.h"
46 #include "emit-rtl.h"
47 #include "varasm.h"
48 #include "stmt.h"
49 #include "expr.h" /* init_return_column_size */
50 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
51 #include "output.h" /* asm_out_file */
52 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
53
54
55 /* ??? Poison these here until it can be done generically. They've been
56 totally replaced in this file; make sure it stays that way. */
57 #undef DWARF2_UNWIND_INFO
58 #undef DWARF2_FRAME_INFO
59 #if (GCC_VERSION >= 3000)
60 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
61 #endif
62
63 #ifndef INCOMING_RETURN_ADDR_RTX
64 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
65 #endif
66
67 /* Maximum size (in bytes) of an artificially generated label. */
68 #define MAX_ARTIFICIAL_LABEL_BYTES 30
69 \f
70 /* A collected description of an entire row of the abstract CFI table. */
71 typedef struct GTY(()) dw_cfi_row_struct
72 {
73 /* The expression that computes the CFA, expressed in two different ways.
74 The CFA member for the simple cases, and the full CFI expression for
75 the complex cases. The later will be a DW_CFA_cfa_expression. */
76 dw_cfa_location cfa;
77 dw_cfi_ref cfa_cfi;
78
79 /* The expressions for any register column that is saved. */
80 cfi_vec reg_save;
81 } dw_cfi_row;
82
83 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
84 typedef struct GTY(()) reg_saved_in_data_struct {
85 rtx orig_reg;
86 rtx saved_in_reg;
87 } reg_saved_in_data;
88
89
90 /* Since we no longer have a proper CFG, we're going to create a facsimile
91 of one on the fly while processing the frame-related insns.
92
93 We create dw_trace_info structures for each extended basic block beginning
94 and ending at a "save point". Save points are labels, barriers, certain
95 notes, and of course the beginning and end of the function.
96
97 As we encounter control transfer insns, we propagate the "current"
98 row state across the edges to the starts of traces. When checking is
99 enabled, we validate that we propagate the same data from all sources.
100
101 All traces are members of the TRACE_INFO array, in the order in which
102 they appear in the instruction stream.
103
104 All save points are present in the TRACE_INDEX hash, mapping the insn
105 starting a trace to the dw_trace_info describing the trace. */
106
107 typedef struct
108 {
109 /* The insn that begins the trace. */
110 rtx_insn *head;
111
112 /* The row state at the beginning and end of the trace. */
113 dw_cfi_row *beg_row, *end_row;
114
115 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
116 while scanning insns. However, the args_size value is irrelevant at
117 any point except can_throw_internal_p insns. Therefore the "delay"
118 sizes the values that must actually be emitted for this trace. */
119 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
120 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
121
122 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
123 rtx_insn *eh_head;
124
125 /* The following variables contain data used in interpreting frame related
126 expressions. These are not part of the "real" row state as defined by
127 Dwarf, but it seems like they need to be propagated into a trace in case
128 frame related expressions have been sunk. */
129 /* ??? This seems fragile. These variables are fragments of a larger
130 expression. If we do not keep the entire expression together, we risk
131 not being able to put it together properly. Consider forcing targets
132 to generate self-contained expressions and dropping all of the magic
133 interpretation code in this file. Or at least refusing to shrink wrap
134 any frame related insn that doesn't contain a complete expression. */
135
136 /* The register used for saving registers to the stack, and its offset
137 from the CFA. */
138 dw_cfa_location cfa_store;
139
140 /* A temporary register holding an integral value used in adjusting SP
141 or setting up the store_reg. The "offset" field holds the integer
142 value, not an offset. */
143 dw_cfa_location cfa_temp;
144
145 /* A set of registers saved in other registers. This is the inverse of
146 the row->reg_save info, if the entry is a DW_CFA_register. This is
147 implemented as a flat array because it normally contains zero or 1
148 entry, depending on the target. IA-64 is the big spender here, using
149 a maximum of 5 entries. */
150 vec<reg_saved_in_data> regs_saved_in_regs;
151
152 /* An identifier for this trace. Used only for debugging dumps. */
153 unsigned id;
154
155 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
156 bool switch_sections;
157
158 /* True if we've seen different values incoming to beg_true_args_size. */
159 bool args_size_undefined;
160 } dw_trace_info;
161
162
163 typedef dw_trace_info *dw_trace_info_ref;
164
165
166 /* Hashtable helpers. */
167
168 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
169 {
170 static inline hashval_t hash (const dw_trace_info *);
171 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
172 };
173
174 inline hashval_t
175 trace_info_hasher::hash (const dw_trace_info *ti)
176 {
177 return INSN_UID (ti->head);
178 }
179
180 inline bool
181 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
182 {
183 return a->head == b->head;
184 }
185
186
187 /* The variables making up the pseudo-cfg, as described above. */
188 static vec<dw_trace_info> trace_info;
189 static vec<dw_trace_info_ref> trace_work_list;
190 static hash_table<trace_info_hasher> *trace_index;
191
192 /* A vector of call frame insns for the CIE. */
193 cfi_vec cie_cfi_vec;
194
195 /* The state of the first row of the FDE table, which includes the
196 state provided by the CIE. */
197 static GTY(()) dw_cfi_row *cie_cfi_row;
198
199 static GTY(()) reg_saved_in_data *cie_return_save;
200
201 static GTY(()) unsigned long dwarf2out_cfi_label_num;
202
203 /* The insn after which a new CFI note should be emitted. */
204 static rtx_insn *add_cfi_insn;
205
206 /* When non-null, add_cfi will add the CFI to this vector. */
207 static cfi_vec *add_cfi_vec;
208
209 /* The current instruction trace. */
210 static dw_trace_info *cur_trace;
211
212 /* The current, i.e. most recently generated, row of the CFI table. */
213 static dw_cfi_row *cur_row;
214
215 /* A copy of the current CFA, for use during the processing of a
216 single insn. */
217 static dw_cfa_location *cur_cfa;
218
219 /* We delay emitting a register save until either (a) we reach the end
220 of the prologue or (b) the register is clobbered. This clusters
221 register saves so that there are fewer pc advances. */
222
223 typedef struct {
224 rtx reg;
225 rtx saved_reg;
226 HOST_WIDE_INT cfa_offset;
227 } queued_reg_save;
228
229
230 static vec<queued_reg_save> queued_reg_saves;
231
232 /* True if any CFI directives were emitted at the current insn. */
233 static bool any_cfis_emitted;
234
235 /* Short-hand for commonly used register numbers. */
236 static unsigned dw_stack_pointer_regnum;
237 static unsigned dw_frame_pointer_regnum;
238 \f
239 /* Hook used by __throw. */
240
241 rtx
242 expand_builtin_dwarf_sp_column (void)
243 {
244 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
245 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
246 }
247
248 /* MEM is a memory reference for the register size table, each element of
249 which has mode MODE. Initialize column C as a return address column. */
250
251 static void
252 init_return_column_size (machine_mode mode, rtx mem, unsigned int c)
253 {
254 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
255 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
256 emit_move_insn (adjust_address (mem, mode, offset),
257 gen_int_mode (size, mode));
258 }
259
260 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
261 init_one_dwarf_reg_size to communicate on what has been done by the
262 latter. */
263
264 typedef struct
265 {
266 /* Whether the dwarf return column was initialized. */
267 bool wrote_return_column;
268
269 /* For each hard register REGNO, whether init_one_dwarf_reg_size
270 was given REGNO to process already. */
271 bool processed_regno [FIRST_PSEUDO_REGISTER];
272
273 } init_one_dwarf_reg_state;
274
275 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
276 initialize the dwarf register size table entry corresponding to register
277 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
278 use for the size entry to initialize, and INIT_STATE is the communication
279 datastructure conveying what we're doing to our caller. */
280
281 static
282 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
283 rtx table, machine_mode slotmode,
284 init_one_dwarf_reg_state *init_state)
285 {
286 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
287 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
288 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
289
290 const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
291 const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
292
293 init_state->processed_regno[regno] = true;
294
295 if (rnum >= DWARF_FRAME_REGISTERS)
296 return;
297
298 if (dnum == DWARF_FRAME_RETURN_COLUMN)
299 {
300 if (regmode == VOIDmode)
301 return;
302 init_state->wrote_return_column = true;
303 }
304
305 if (slotoffset < 0)
306 return;
307
308 emit_move_insn (adjust_address (table, slotmode, slotoffset),
309 gen_int_mode (regsize, slotmode));
310 }
311
312 /* Generate code to initialize the dwarf register size table located
313 at the provided ADDRESS. */
314
315 void
316 expand_builtin_init_dwarf_reg_sizes (tree address)
317 {
318 unsigned int i;
319 machine_mode mode = TYPE_MODE (char_type_node);
320 rtx addr = expand_normal (address);
321 rtx mem = gen_rtx_MEM (BLKmode, addr);
322
323 init_one_dwarf_reg_state init_state;
324
325 memset ((char *)&init_state, 0, sizeof (init_state));
326
327 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
328 {
329 machine_mode save_mode;
330 rtx span;
331
332 /* No point in processing a register multiple times. This could happen
333 with register spans, e.g. when a reg is first processed as a piece of
334 a span, then as a register on its own later on. */
335
336 if (init_state.processed_regno[i])
337 continue;
338
339 save_mode = targetm.dwarf_frame_reg_mode (i);
340 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
341
342 if (!span)
343 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
344 else
345 {
346 for (int si = 0; si < XVECLEN (span, 0); si++)
347 {
348 rtx reg = XVECEXP (span, 0, si);
349
350 init_one_dwarf_reg_size
351 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
352 }
353 }
354 }
355
356 if (!init_state.wrote_return_column)
357 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
358
359 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
360 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
361 #endif
362
363 targetm.init_dwarf_reg_sizes_extra (address);
364 }
365
366 \f
367 static dw_trace_info *
368 get_trace_info (rtx_insn *insn)
369 {
370 dw_trace_info dummy;
371 dummy.head = insn;
372 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
373 }
374
375 static bool
376 save_point_p (rtx_insn *insn)
377 {
378 /* Labels, except those that are really jump tables. */
379 if (LABEL_P (insn))
380 return inside_basic_block_p (insn);
381
382 /* We split traces at the prologue/epilogue notes because those
383 are points at which the unwind info is usually stable. This
384 makes it easier to find spots with identical unwind info so
385 that we can use remember/restore_state opcodes. */
386 if (NOTE_P (insn))
387 switch (NOTE_KIND (insn))
388 {
389 case NOTE_INSN_PROLOGUE_END:
390 case NOTE_INSN_EPILOGUE_BEG:
391 return true;
392 }
393
394 return false;
395 }
396
397 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
398
399 static inline HOST_WIDE_INT
400 div_data_align (HOST_WIDE_INT off)
401 {
402 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
403 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
404 return r;
405 }
406
407 /* Return true if we need a signed version of a given opcode
408 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
409
410 static inline bool
411 need_data_align_sf_opcode (HOST_WIDE_INT off)
412 {
413 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
414 }
415
416 /* Return a pointer to a newly allocated Call Frame Instruction. */
417
418 static inline dw_cfi_ref
419 new_cfi (void)
420 {
421 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
422
423 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
424 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
425
426 return cfi;
427 }
428
429 /* Return a newly allocated CFI row, with no defined data. */
430
431 static dw_cfi_row *
432 new_cfi_row (void)
433 {
434 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
435
436 row->cfa.reg = INVALID_REGNUM;
437
438 return row;
439 }
440
441 /* Return a copy of an existing CFI row. */
442
443 static dw_cfi_row *
444 copy_cfi_row (dw_cfi_row *src)
445 {
446 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
447
448 *dst = *src;
449 dst->reg_save = vec_safe_copy (src->reg_save);
450
451 return dst;
452 }
453
454 /* Generate a new label for the CFI info to refer to. */
455
456 static char *
457 dwarf2out_cfi_label (void)
458 {
459 int num = dwarf2out_cfi_label_num++;
460 char label[20];
461
462 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
463
464 return xstrdup (label);
465 }
466
467 /* Add CFI either to the current insn stream or to a vector, or both. */
468
469 static void
470 add_cfi (dw_cfi_ref cfi)
471 {
472 any_cfis_emitted = true;
473
474 if (add_cfi_insn != NULL)
475 {
476 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
477 NOTE_CFI (add_cfi_insn) = cfi;
478 }
479
480 if (add_cfi_vec != NULL)
481 vec_safe_push (*add_cfi_vec, cfi);
482 }
483
484 static void
485 add_cfi_args_size (HOST_WIDE_INT size)
486 {
487 dw_cfi_ref cfi = new_cfi ();
488
489 /* While we can occasionally have args_size < 0 internally, this state
490 should not persist at a point we actually need an opcode. */
491 gcc_assert (size >= 0);
492
493 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
494 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
495
496 add_cfi (cfi);
497 }
498
499 static void
500 add_cfi_restore (unsigned reg)
501 {
502 dw_cfi_ref cfi = new_cfi ();
503
504 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
505 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
506
507 add_cfi (cfi);
508 }
509
510 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
511 that the register column is no longer saved. */
512
513 static void
514 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
515 {
516 if (vec_safe_length (row->reg_save) <= column)
517 vec_safe_grow_cleared (row->reg_save, column + 1);
518 (*row->reg_save)[column] = cfi;
519 }
520
521 /* This function fills in aa dw_cfa_location structure from a dwarf location
522 descriptor sequence. */
523
524 static void
525 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
526 {
527 struct dw_loc_descr_node *ptr;
528 cfa->offset = 0;
529 cfa->base_offset = 0;
530 cfa->indirect = 0;
531 cfa->reg = -1;
532
533 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
534 {
535 enum dwarf_location_atom op = ptr->dw_loc_opc;
536
537 switch (op)
538 {
539 case DW_OP_reg0:
540 case DW_OP_reg1:
541 case DW_OP_reg2:
542 case DW_OP_reg3:
543 case DW_OP_reg4:
544 case DW_OP_reg5:
545 case DW_OP_reg6:
546 case DW_OP_reg7:
547 case DW_OP_reg8:
548 case DW_OP_reg9:
549 case DW_OP_reg10:
550 case DW_OP_reg11:
551 case DW_OP_reg12:
552 case DW_OP_reg13:
553 case DW_OP_reg14:
554 case DW_OP_reg15:
555 case DW_OP_reg16:
556 case DW_OP_reg17:
557 case DW_OP_reg18:
558 case DW_OP_reg19:
559 case DW_OP_reg20:
560 case DW_OP_reg21:
561 case DW_OP_reg22:
562 case DW_OP_reg23:
563 case DW_OP_reg24:
564 case DW_OP_reg25:
565 case DW_OP_reg26:
566 case DW_OP_reg27:
567 case DW_OP_reg28:
568 case DW_OP_reg29:
569 case DW_OP_reg30:
570 case DW_OP_reg31:
571 cfa->reg = op - DW_OP_reg0;
572 break;
573 case DW_OP_regx:
574 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
575 break;
576 case DW_OP_breg0:
577 case DW_OP_breg1:
578 case DW_OP_breg2:
579 case DW_OP_breg3:
580 case DW_OP_breg4:
581 case DW_OP_breg5:
582 case DW_OP_breg6:
583 case DW_OP_breg7:
584 case DW_OP_breg8:
585 case DW_OP_breg9:
586 case DW_OP_breg10:
587 case DW_OP_breg11:
588 case DW_OP_breg12:
589 case DW_OP_breg13:
590 case DW_OP_breg14:
591 case DW_OP_breg15:
592 case DW_OP_breg16:
593 case DW_OP_breg17:
594 case DW_OP_breg18:
595 case DW_OP_breg19:
596 case DW_OP_breg20:
597 case DW_OP_breg21:
598 case DW_OP_breg22:
599 case DW_OP_breg23:
600 case DW_OP_breg24:
601 case DW_OP_breg25:
602 case DW_OP_breg26:
603 case DW_OP_breg27:
604 case DW_OP_breg28:
605 case DW_OP_breg29:
606 case DW_OP_breg30:
607 case DW_OP_breg31:
608 cfa->reg = op - DW_OP_breg0;
609 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
610 break;
611 case DW_OP_bregx:
612 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
613 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
614 break;
615 case DW_OP_deref:
616 cfa->indirect = 1;
617 break;
618 case DW_OP_plus_uconst:
619 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
620 break;
621 default:
622 gcc_unreachable ();
623 }
624 }
625 }
626
627 /* Find the previous value for the CFA, iteratively. CFI is the opcode
628 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
629 one level of remember/restore state processing. */
630
631 void
632 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
633 {
634 switch (cfi->dw_cfi_opc)
635 {
636 case DW_CFA_def_cfa_offset:
637 case DW_CFA_def_cfa_offset_sf:
638 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
639 break;
640 case DW_CFA_def_cfa_register:
641 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
642 break;
643 case DW_CFA_def_cfa:
644 case DW_CFA_def_cfa_sf:
645 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
646 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
647 break;
648 case DW_CFA_def_cfa_expression:
649 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
650 break;
651
652 case DW_CFA_remember_state:
653 gcc_assert (!remember->in_use);
654 *remember = *loc;
655 remember->in_use = 1;
656 break;
657 case DW_CFA_restore_state:
658 gcc_assert (remember->in_use);
659 *loc = *remember;
660 remember->in_use = 0;
661 break;
662
663 default:
664 break;
665 }
666 }
667
668 /* Determine if two dw_cfa_location structures define the same data. */
669
670 bool
671 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
672 {
673 return (loc1->reg == loc2->reg
674 && loc1->offset == loc2->offset
675 && loc1->indirect == loc2->indirect
676 && (loc1->indirect == 0
677 || loc1->base_offset == loc2->base_offset));
678 }
679
680 /* Determine if two CFI operands are identical. */
681
682 static bool
683 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
684 {
685 switch (t)
686 {
687 case dw_cfi_oprnd_unused:
688 return true;
689 case dw_cfi_oprnd_reg_num:
690 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
691 case dw_cfi_oprnd_offset:
692 return a->dw_cfi_offset == b->dw_cfi_offset;
693 case dw_cfi_oprnd_addr:
694 return (a->dw_cfi_addr == b->dw_cfi_addr
695 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
696 case dw_cfi_oprnd_loc:
697 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
698 }
699 gcc_unreachable ();
700 }
701
702 /* Determine if two CFI entries are identical. */
703
704 static bool
705 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
706 {
707 enum dwarf_call_frame_info opc;
708
709 /* Make things easier for our callers, including missing operands. */
710 if (a == b)
711 return true;
712 if (a == NULL || b == NULL)
713 return false;
714
715 /* Obviously, the opcodes must match. */
716 opc = a->dw_cfi_opc;
717 if (opc != b->dw_cfi_opc)
718 return false;
719
720 /* Compare the two operands, re-using the type of the operands as
721 already exposed elsewhere. */
722 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
723 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
724 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
725 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
726 }
727
728 /* Determine if two CFI_ROW structures are identical. */
729
730 static bool
731 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
732 {
733 size_t i, n_a, n_b, n_max;
734
735 if (a->cfa_cfi)
736 {
737 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
738 return false;
739 }
740 else if (!cfa_equal_p (&a->cfa, &b->cfa))
741 return false;
742
743 n_a = vec_safe_length (a->reg_save);
744 n_b = vec_safe_length (b->reg_save);
745 n_max = MAX (n_a, n_b);
746
747 for (i = 0; i < n_max; ++i)
748 {
749 dw_cfi_ref r_a = NULL, r_b = NULL;
750
751 if (i < n_a)
752 r_a = (*a->reg_save)[i];
753 if (i < n_b)
754 r_b = (*b->reg_save)[i];
755
756 if (!cfi_equal_p (r_a, r_b))
757 return false;
758 }
759
760 return true;
761 }
762
763 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
764 what opcode to emit. Returns the CFI opcode to effect the change, or
765 NULL if NEW_CFA == OLD_CFA. */
766
767 static dw_cfi_ref
768 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
769 {
770 dw_cfi_ref cfi;
771
772 /* If nothing changed, no need to issue any call frame instructions. */
773 if (cfa_equal_p (old_cfa, new_cfa))
774 return NULL;
775
776 cfi = new_cfi ();
777
778 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
779 {
780 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
781 the CFA register did not change but the offset did. The data
782 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
783 in the assembler via the .cfi_def_cfa_offset directive. */
784 if (new_cfa->offset < 0)
785 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
786 else
787 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
788 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
789 }
790 else if (new_cfa->offset == old_cfa->offset
791 && old_cfa->reg != INVALID_REGNUM
792 && !new_cfa->indirect
793 && !old_cfa->indirect)
794 {
795 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
796 indicating the CFA register has changed to <register> but the
797 offset has not changed. */
798 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
799 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
800 }
801 else if (new_cfa->indirect == 0)
802 {
803 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
804 indicating the CFA register has changed to <register> with
805 the specified offset. The data factoring for DW_CFA_def_cfa_sf
806 happens in output_cfi, or in the assembler via the .cfi_def_cfa
807 directive. */
808 if (new_cfa->offset < 0)
809 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
810 else
811 cfi->dw_cfi_opc = DW_CFA_def_cfa;
812 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
813 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
814 }
815 else
816 {
817 /* Construct a DW_CFA_def_cfa_expression instruction to
818 calculate the CFA using a full location expression since no
819 register-offset pair is available. */
820 struct dw_loc_descr_node *loc_list;
821
822 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
823 loc_list = build_cfa_loc (new_cfa, 0);
824 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
825 }
826
827 return cfi;
828 }
829
830 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
831
832 static void
833 def_cfa_1 (dw_cfa_location *new_cfa)
834 {
835 dw_cfi_ref cfi;
836
837 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
838 cur_trace->cfa_store.offset = new_cfa->offset;
839
840 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
841 if (cfi)
842 {
843 cur_row->cfa = *new_cfa;
844 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
845 ? cfi : NULL);
846
847 add_cfi (cfi);
848 }
849 }
850
851 /* Add the CFI for saving a register. REG is the CFA column number.
852 If SREG is -1, the register is saved at OFFSET from the CFA;
853 otherwise it is saved in SREG. */
854
855 static void
856 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
857 {
858 dw_fde_ref fde = cfun ? cfun->fde : NULL;
859 dw_cfi_ref cfi = new_cfi ();
860
861 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
862
863 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
864 if (fde
865 && fde->stack_realign
866 && sreg == INVALID_REGNUM)
867 {
868 cfi->dw_cfi_opc = DW_CFA_expression;
869 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
870 cfi->dw_cfi_oprnd2.dw_cfi_loc
871 = build_cfa_aligned_loc (&cur_row->cfa, offset,
872 fde->stack_realignment);
873 }
874 else if (sreg == INVALID_REGNUM)
875 {
876 if (need_data_align_sf_opcode (offset))
877 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
878 else if (reg & ~0x3f)
879 cfi->dw_cfi_opc = DW_CFA_offset_extended;
880 else
881 cfi->dw_cfi_opc = DW_CFA_offset;
882 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
883 }
884 else if (sreg == reg)
885 {
886 /* While we could emit something like DW_CFA_same_value or
887 DW_CFA_restore, we never expect to see something like that
888 in a prologue. This is more likely to be a bug. A backend
889 can always bypass this by using REG_CFA_RESTORE directly. */
890 gcc_unreachable ();
891 }
892 else
893 {
894 cfi->dw_cfi_opc = DW_CFA_register;
895 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
896 }
897
898 add_cfi (cfi);
899 update_row_reg_save (cur_row, reg, cfi);
900 }
901
902 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
903 and adjust data structures to match. */
904
905 static void
906 notice_args_size (rtx_insn *insn)
907 {
908 HOST_WIDE_INT args_size, delta;
909 rtx note;
910
911 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
912 if (note == NULL)
913 return;
914
915 args_size = INTVAL (XEXP (note, 0));
916 delta = args_size - cur_trace->end_true_args_size;
917 if (delta == 0)
918 return;
919
920 cur_trace->end_true_args_size = args_size;
921
922 /* If the CFA is computed off the stack pointer, then we must adjust
923 the computation of the CFA as well. */
924 if (cur_cfa->reg == dw_stack_pointer_regnum)
925 {
926 gcc_assert (!cur_cfa->indirect);
927
928 /* Convert a change in args_size (always a positive in the
929 direction of stack growth) to a change in stack pointer. */
930 if (!STACK_GROWS_DOWNWARD)
931 delta = -delta;
932
933 cur_cfa->offset += delta;
934 }
935 }
936
937 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
938 data within the trace related to EH insns and args_size. */
939
940 static void
941 notice_eh_throw (rtx_insn *insn)
942 {
943 HOST_WIDE_INT args_size;
944
945 args_size = cur_trace->end_true_args_size;
946 if (cur_trace->eh_head == NULL)
947 {
948 cur_trace->eh_head = insn;
949 cur_trace->beg_delay_args_size = args_size;
950 cur_trace->end_delay_args_size = args_size;
951 }
952 else if (cur_trace->end_delay_args_size != args_size)
953 {
954 cur_trace->end_delay_args_size = args_size;
955
956 /* ??? If the CFA is the stack pointer, search backward for the last
957 CFI note and insert there. Given that the stack changed for the
958 args_size change, there *must* be such a note in between here and
959 the last eh insn. */
960 add_cfi_args_size (args_size);
961 }
962 }
963
964 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
965 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
966 used in places where rtl is prohibited. */
967
968 static inline unsigned
969 dwf_regno (const_rtx reg)
970 {
971 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
972 return DWARF_FRAME_REGNUM (REGNO (reg));
973 }
974
975 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
976
977 static bool
978 compare_reg_or_pc (rtx x, rtx y)
979 {
980 if (REG_P (x) && REG_P (y))
981 return REGNO (x) == REGNO (y);
982 return x == y;
983 }
984
985 /* Record SRC as being saved in DEST. DEST may be null to delete an
986 existing entry. SRC may be a register or PC_RTX. */
987
988 static void
989 record_reg_saved_in_reg (rtx dest, rtx src)
990 {
991 reg_saved_in_data *elt;
992 size_t i;
993
994 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
995 if (compare_reg_or_pc (elt->orig_reg, src))
996 {
997 if (dest == NULL)
998 cur_trace->regs_saved_in_regs.unordered_remove (i);
999 else
1000 elt->saved_in_reg = dest;
1001 return;
1002 }
1003
1004 if (dest == NULL)
1005 return;
1006
1007 reg_saved_in_data e = {src, dest};
1008 cur_trace->regs_saved_in_regs.safe_push (e);
1009 }
1010
1011 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1012 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1013
1014 static void
1015 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1016 {
1017 queued_reg_save *q;
1018 queued_reg_save e = {reg, sreg, offset};
1019 size_t i;
1020
1021 /* Duplicates waste space, but it's also necessary to remove them
1022 for correctness, since the queue gets output in reverse order. */
1023 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1024 if (compare_reg_or_pc (q->reg, reg))
1025 {
1026 *q = e;
1027 return;
1028 }
1029
1030 queued_reg_saves.safe_push (e);
1031 }
1032
1033 /* Output all the entries in QUEUED_REG_SAVES. */
1034
1035 static void
1036 dwarf2out_flush_queued_reg_saves (void)
1037 {
1038 queued_reg_save *q;
1039 size_t i;
1040
1041 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1042 {
1043 unsigned int reg, sreg;
1044
1045 record_reg_saved_in_reg (q->saved_reg, q->reg);
1046
1047 if (q->reg == pc_rtx)
1048 reg = DWARF_FRAME_RETURN_COLUMN;
1049 else
1050 reg = dwf_regno (q->reg);
1051 if (q->saved_reg)
1052 sreg = dwf_regno (q->saved_reg);
1053 else
1054 sreg = INVALID_REGNUM;
1055 reg_save (reg, sreg, q->cfa_offset);
1056 }
1057
1058 queued_reg_saves.truncate (0);
1059 }
1060
1061 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1062 location for? Or, does it clobber a register which we've previously
1063 said that some other register is saved in, and for which we now
1064 have a new location for? */
1065
1066 static bool
1067 clobbers_queued_reg_save (const_rtx insn)
1068 {
1069 queued_reg_save *q;
1070 size_t iq;
1071
1072 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1073 {
1074 size_t ir;
1075 reg_saved_in_data *rir;
1076
1077 if (modified_in_p (q->reg, insn))
1078 return true;
1079
1080 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1081 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1082 && modified_in_p (rir->saved_in_reg, insn))
1083 return true;
1084 }
1085
1086 return false;
1087 }
1088
1089 /* What register, if any, is currently saved in REG? */
1090
1091 static rtx
1092 reg_saved_in (rtx reg)
1093 {
1094 unsigned int regn = REGNO (reg);
1095 queued_reg_save *q;
1096 reg_saved_in_data *rir;
1097 size_t i;
1098
1099 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1100 if (q->saved_reg && regn == REGNO (q->saved_reg))
1101 return q->reg;
1102
1103 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1104 if (regn == REGNO (rir->saved_in_reg))
1105 return rir->orig_reg;
1106
1107 return NULL_RTX;
1108 }
1109
1110 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1111
1112 static void
1113 dwarf2out_frame_debug_def_cfa (rtx pat)
1114 {
1115 memset (cur_cfa, 0, sizeof (*cur_cfa));
1116
1117 if (GET_CODE (pat) == PLUS)
1118 {
1119 cur_cfa->offset = INTVAL (XEXP (pat, 1));
1120 pat = XEXP (pat, 0);
1121 }
1122 if (MEM_P (pat))
1123 {
1124 cur_cfa->indirect = 1;
1125 pat = XEXP (pat, 0);
1126 if (GET_CODE (pat) == PLUS)
1127 {
1128 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1129 pat = XEXP (pat, 0);
1130 }
1131 }
1132 /* ??? If this fails, we could be calling into the _loc functions to
1133 define a full expression. So far no port does that. */
1134 gcc_assert (REG_P (pat));
1135 cur_cfa->reg = dwf_regno (pat);
1136 }
1137
1138 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1139
1140 static void
1141 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1142 {
1143 rtx src, dest;
1144
1145 gcc_assert (GET_CODE (pat) == SET);
1146 dest = XEXP (pat, 0);
1147 src = XEXP (pat, 1);
1148
1149 switch (GET_CODE (src))
1150 {
1151 case PLUS:
1152 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1153 cur_cfa->offset -= INTVAL (XEXP (src, 1));
1154 break;
1155
1156 case REG:
1157 break;
1158
1159 default:
1160 gcc_unreachable ();
1161 }
1162
1163 cur_cfa->reg = dwf_regno (dest);
1164 gcc_assert (cur_cfa->indirect == 0);
1165 }
1166
1167 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1168
1169 static void
1170 dwarf2out_frame_debug_cfa_offset (rtx set)
1171 {
1172 HOST_WIDE_INT offset;
1173 rtx src, addr, span;
1174 unsigned int sregno;
1175
1176 src = XEXP (set, 1);
1177 addr = XEXP (set, 0);
1178 gcc_assert (MEM_P (addr));
1179 addr = XEXP (addr, 0);
1180
1181 /* As documented, only consider extremely simple addresses. */
1182 switch (GET_CODE (addr))
1183 {
1184 case REG:
1185 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1186 offset = -cur_cfa->offset;
1187 break;
1188 case PLUS:
1189 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1190 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1191 break;
1192 default:
1193 gcc_unreachable ();
1194 }
1195
1196 if (src == pc_rtx)
1197 {
1198 span = NULL;
1199 sregno = DWARF_FRAME_RETURN_COLUMN;
1200 }
1201 else
1202 {
1203 span = targetm.dwarf_register_span (src);
1204 sregno = dwf_regno (src);
1205 }
1206
1207 /* ??? We'd like to use queue_reg_save, but we need to come up with
1208 a different flushing heuristic for epilogues. */
1209 if (!span)
1210 reg_save (sregno, INVALID_REGNUM, offset);
1211 else
1212 {
1213 /* We have a PARALLEL describing where the contents of SRC live.
1214 Adjust the offset for each piece of the PARALLEL. */
1215 HOST_WIDE_INT span_offset = offset;
1216
1217 gcc_assert (GET_CODE (span) == PARALLEL);
1218
1219 const int par_len = XVECLEN (span, 0);
1220 for (int par_index = 0; par_index < par_len; par_index++)
1221 {
1222 rtx elem = XVECEXP (span, 0, par_index);
1223 sregno = dwf_regno (src);
1224 reg_save (sregno, INVALID_REGNUM, span_offset);
1225 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1226 }
1227 }
1228 }
1229
1230 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1231
1232 static void
1233 dwarf2out_frame_debug_cfa_register (rtx set)
1234 {
1235 rtx src, dest;
1236 unsigned sregno, dregno;
1237
1238 src = XEXP (set, 1);
1239 dest = XEXP (set, 0);
1240
1241 record_reg_saved_in_reg (dest, src);
1242 if (src == pc_rtx)
1243 sregno = DWARF_FRAME_RETURN_COLUMN;
1244 else
1245 sregno = dwf_regno (src);
1246
1247 dregno = dwf_regno (dest);
1248
1249 /* ??? We'd like to use queue_reg_save, but we need to come up with
1250 a different flushing heuristic for epilogues. */
1251 reg_save (sregno, dregno, 0);
1252 }
1253
1254 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1255
1256 static void
1257 dwarf2out_frame_debug_cfa_expression (rtx set)
1258 {
1259 rtx src, dest, span;
1260 dw_cfi_ref cfi = new_cfi ();
1261 unsigned regno;
1262
1263 dest = SET_DEST (set);
1264 src = SET_SRC (set);
1265
1266 gcc_assert (REG_P (src));
1267 gcc_assert (MEM_P (dest));
1268
1269 span = targetm.dwarf_register_span (src);
1270 gcc_assert (!span);
1271
1272 regno = dwf_regno (src);
1273
1274 cfi->dw_cfi_opc = DW_CFA_expression;
1275 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1276 cfi->dw_cfi_oprnd2.dw_cfi_loc
1277 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1278 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1279
1280 /* ??? We'd like to use queue_reg_save, were the interface different,
1281 and, as above, we could manage flushing for epilogues. */
1282 add_cfi (cfi);
1283 update_row_reg_save (cur_row, regno, cfi);
1284 }
1285
1286 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1287
1288 static void
1289 dwarf2out_frame_debug_cfa_restore (rtx reg)
1290 {
1291 gcc_assert (REG_P (reg));
1292
1293 rtx span = targetm.dwarf_register_span (reg);
1294 if (!span)
1295 {
1296 unsigned int regno = dwf_regno (reg);
1297 add_cfi_restore (regno);
1298 update_row_reg_save (cur_row, regno, NULL);
1299 }
1300 else
1301 {
1302 /* We have a PARALLEL describing where the contents of REG live.
1303 Restore the register for each piece of the PARALLEL. */
1304 gcc_assert (GET_CODE (span) == PARALLEL);
1305
1306 const int par_len = XVECLEN (span, 0);
1307 for (int par_index = 0; par_index < par_len; par_index++)
1308 {
1309 reg = XVECEXP (span, 0, par_index);
1310 gcc_assert (REG_P (reg));
1311 unsigned int regno = dwf_regno (reg);
1312 add_cfi_restore (regno);
1313 update_row_reg_save (cur_row, regno, NULL);
1314 }
1315 }
1316 }
1317
1318 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1319 ??? Perhaps we should note in the CIE where windows are saved (instead of
1320 assuming 0(cfa)) and what registers are in the window. */
1321
1322 static void
1323 dwarf2out_frame_debug_cfa_window_save (void)
1324 {
1325 dw_cfi_ref cfi = new_cfi ();
1326
1327 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1328 add_cfi (cfi);
1329 }
1330
1331 /* Record call frame debugging information for an expression EXPR,
1332 which either sets SP or FP (adjusting how we calculate the frame
1333 address) or saves a register to the stack or another register.
1334 LABEL indicates the address of EXPR.
1335
1336 This function encodes a state machine mapping rtxes to actions on
1337 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1338 users need not read the source code.
1339
1340 The High-Level Picture
1341
1342 Changes in the register we use to calculate the CFA: Currently we
1343 assume that if you copy the CFA register into another register, we
1344 should take the other one as the new CFA register; this seems to
1345 work pretty well. If it's wrong for some target, it's simple
1346 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1347
1348 Changes in the register we use for saving registers to the stack:
1349 This is usually SP, but not always. Again, we deduce that if you
1350 copy SP into another register (and SP is not the CFA register),
1351 then the new register is the one we will be using for register
1352 saves. This also seems to work.
1353
1354 Register saves: There's not much guesswork about this one; if
1355 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1356 register save, and the register used to calculate the destination
1357 had better be the one we think we're using for this purpose.
1358 It's also assumed that a copy from a call-saved register to another
1359 register is saving that register if RTX_FRAME_RELATED_P is set on
1360 that instruction. If the copy is from a call-saved register to
1361 the *same* register, that means that the register is now the same
1362 value as in the caller.
1363
1364 Except: If the register being saved is the CFA register, and the
1365 offset is nonzero, we are saving the CFA, so we assume we have to
1366 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1367 the intent is to save the value of SP from the previous frame.
1368
1369 In addition, if a register has previously been saved to a different
1370 register,
1371
1372 Invariants / Summaries of Rules
1373
1374 cfa current rule for calculating the CFA. It usually
1375 consists of a register and an offset. This is
1376 actually stored in *cur_cfa, but abbreviated
1377 for the purposes of this documentation.
1378 cfa_store register used by prologue code to save things to the stack
1379 cfa_store.offset is the offset from the value of
1380 cfa_store.reg to the actual CFA
1381 cfa_temp register holding an integral value. cfa_temp.offset
1382 stores the value, which will be used to adjust the
1383 stack pointer. cfa_temp is also used like cfa_store,
1384 to track stores to the stack via fp or a temp reg.
1385
1386 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1387 with cfa.reg as the first operand changes the cfa.reg and its
1388 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1389 cfa_temp.offset.
1390
1391 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1392 expression yielding a constant. This sets cfa_temp.reg
1393 and cfa_temp.offset.
1394
1395 Rule 5: Create a new register cfa_store used to save items to the
1396 stack.
1397
1398 Rules 10-14: Save a register to the stack. Define offset as the
1399 difference of the original location and cfa_store's
1400 location (or cfa_temp's location if cfa_temp is used).
1401
1402 Rules 16-20: If AND operation happens on sp in prologue, we assume
1403 stack is realigned. We will use a group of DW_OP_XXX
1404 expressions to represent the location of the stored
1405 register instead of CFA+offset.
1406
1407 The Rules
1408
1409 "{a,b}" indicates a choice of a xor b.
1410 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1411
1412 Rule 1:
1413 (set <reg1> <reg2>:cfa.reg)
1414 effects: cfa.reg = <reg1>
1415 cfa.offset unchanged
1416 cfa_temp.reg = <reg1>
1417 cfa_temp.offset = cfa.offset
1418
1419 Rule 2:
1420 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1421 {<const_int>,<reg>:cfa_temp.reg}))
1422 effects: cfa.reg = sp if fp used
1423 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1424 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1425 if cfa_store.reg==sp
1426
1427 Rule 3:
1428 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1429 effects: cfa.reg = fp
1430 cfa_offset += +/- <const_int>
1431
1432 Rule 4:
1433 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1434 constraints: <reg1> != fp
1435 <reg1> != sp
1436 effects: cfa.reg = <reg1>
1437 cfa_temp.reg = <reg1>
1438 cfa_temp.offset = cfa.offset
1439
1440 Rule 5:
1441 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1442 constraints: <reg1> != fp
1443 <reg1> != sp
1444 effects: cfa_store.reg = <reg1>
1445 cfa_store.offset = cfa.offset - cfa_temp.offset
1446
1447 Rule 6:
1448 (set <reg> <const_int>)
1449 effects: cfa_temp.reg = <reg>
1450 cfa_temp.offset = <const_int>
1451
1452 Rule 7:
1453 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1454 effects: cfa_temp.reg = <reg1>
1455 cfa_temp.offset |= <const_int>
1456
1457 Rule 8:
1458 (set <reg> (high <exp>))
1459 effects: none
1460
1461 Rule 9:
1462 (set <reg> (lo_sum <exp> <const_int>))
1463 effects: cfa_temp.reg = <reg>
1464 cfa_temp.offset = <const_int>
1465
1466 Rule 10:
1467 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1468 effects: cfa_store.offset -= <const_int>
1469 cfa.offset = cfa_store.offset if cfa.reg == sp
1470 cfa.reg = sp
1471 cfa.base_offset = -cfa_store.offset
1472
1473 Rule 11:
1474 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1475 effects: cfa_store.offset += -/+ mode_size(mem)
1476 cfa.offset = cfa_store.offset if cfa.reg == sp
1477 cfa.reg = sp
1478 cfa.base_offset = -cfa_store.offset
1479
1480 Rule 12:
1481 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1482
1483 <reg2>)
1484 effects: cfa.reg = <reg1>
1485 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1486
1487 Rule 13:
1488 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1489 effects: cfa.reg = <reg1>
1490 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1491
1492 Rule 14:
1493 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1494 effects: cfa.reg = <reg1>
1495 cfa.base_offset = -cfa_temp.offset
1496 cfa_temp.offset -= mode_size(mem)
1497
1498 Rule 15:
1499 (set <reg> {unspec, unspec_volatile})
1500 effects: target-dependent
1501
1502 Rule 16:
1503 (set sp (and: sp <const_int>))
1504 constraints: cfa_store.reg == sp
1505 effects: cfun->fde.stack_realign = 1
1506 cfa_store.offset = 0
1507 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1508
1509 Rule 17:
1510 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1511 effects: cfa_store.offset += -/+ mode_size(mem)
1512
1513 Rule 18:
1514 (set (mem ({pre_inc, pre_dec} sp)) fp)
1515 constraints: fde->stack_realign == 1
1516 effects: cfa_store.offset = 0
1517 cfa.reg != HARD_FRAME_POINTER_REGNUM
1518
1519 Rule 19:
1520 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1521 constraints: fde->stack_realign == 1
1522 && cfa.offset == 0
1523 && cfa.indirect == 0
1524 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1525 effects: Use DW_CFA_def_cfa_expression to define cfa
1526 cfa.reg == fde->drap_reg */
1527
1528 static void
1529 dwarf2out_frame_debug_expr (rtx expr)
1530 {
1531 rtx src, dest, span;
1532 HOST_WIDE_INT offset;
1533 dw_fde_ref fde;
1534
1535 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1536 the PARALLEL independently. The first element is always processed if
1537 it is a SET. This is for backward compatibility. Other elements
1538 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1539 flag is set in them. */
1540 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1541 {
1542 int par_index;
1543 int limit = XVECLEN (expr, 0);
1544 rtx elem;
1545
1546 /* PARALLELs have strict read-modify-write semantics, so we
1547 ought to evaluate every rvalue before changing any lvalue.
1548 It's cumbersome to do that in general, but there's an
1549 easy approximation that is enough for all current users:
1550 handle register saves before register assignments. */
1551 if (GET_CODE (expr) == PARALLEL)
1552 for (par_index = 0; par_index < limit; par_index++)
1553 {
1554 elem = XVECEXP (expr, 0, par_index);
1555 if (GET_CODE (elem) == SET
1556 && MEM_P (SET_DEST (elem))
1557 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1558 dwarf2out_frame_debug_expr (elem);
1559 }
1560
1561 for (par_index = 0; par_index < limit; par_index++)
1562 {
1563 elem = XVECEXP (expr, 0, par_index);
1564 if (GET_CODE (elem) == SET
1565 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1566 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1567 dwarf2out_frame_debug_expr (elem);
1568 }
1569 return;
1570 }
1571
1572 gcc_assert (GET_CODE (expr) == SET);
1573
1574 src = SET_SRC (expr);
1575 dest = SET_DEST (expr);
1576
1577 if (REG_P (src))
1578 {
1579 rtx rsi = reg_saved_in (src);
1580 if (rsi)
1581 src = rsi;
1582 }
1583
1584 fde = cfun->fde;
1585
1586 switch (GET_CODE (dest))
1587 {
1588 case REG:
1589 switch (GET_CODE (src))
1590 {
1591 /* Setting FP from SP. */
1592 case REG:
1593 if (cur_cfa->reg == dwf_regno (src))
1594 {
1595 /* Rule 1 */
1596 /* Update the CFA rule wrt SP or FP. Make sure src is
1597 relative to the current CFA register.
1598
1599 We used to require that dest be either SP or FP, but the
1600 ARM copies SP to a temporary register, and from there to
1601 FP. So we just rely on the backends to only set
1602 RTX_FRAME_RELATED_P on appropriate insns. */
1603 cur_cfa->reg = dwf_regno (dest);
1604 cur_trace->cfa_temp.reg = cur_cfa->reg;
1605 cur_trace->cfa_temp.offset = cur_cfa->offset;
1606 }
1607 else
1608 {
1609 /* Saving a register in a register. */
1610 gcc_assert (!fixed_regs [REGNO (dest)]
1611 /* For the SPARC and its register window. */
1612 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1613
1614 /* After stack is aligned, we can only save SP in FP
1615 if drap register is used. In this case, we have
1616 to restore stack pointer with the CFA value and we
1617 don't generate this DWARF information. */
1618 if (fde
1619 && fde->stack_realign
1620 && REGNO (src) == STACK_POINTER_REGNUM)
1621 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1622 && fde->drap_reg != INVALID_REGNUM
1623 && cur_cfa->reg != dwf_regno (src));
1624 else
1625 queue_reg_save (src, dest, 0);
1626 }
1627 break;
1628
1629 case PLUS:
1630 case MINUS:
1631 case LO_SUM:
1632 if (dest == stack_pointer_rtx)
1633 {
1634 /* Rule 2 */
1635 /* Adjusting SP. */
1636 switch (GET_CODE (XEXP (src, 1)))
1637 {
1638 case CONST_INT:
1639 offset = INTVAL (XEXP (src, 1));
1640 break;
1641 case REG:
1642 gcc_assert (dwf_regno (XEXP (src, 1))
1643 == cur_trace->cfa_temp.reg);
1644 offset = cur_trace->cfa_temp.offset;
1645 break;
1646 default:
1647 gcc_unreachable ();
1648 }
1649
1650 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1651 {
1652 /* Restoring SP from FP in the epilogue. */
1653 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1654 cur_cfa->reg = dw_stack_pointer_regnum;
1655 }
1656 else if (GET_CODE (src) == LO_SUM)
1657 /* Assume we've set the source reg of the LO_SUM from sp. */
1658 ;
1659 else
1660 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1661
1662 if (GET_CODE (src) != MINUS)
1663 offset = -offset;
1664 if (cur_cfa->reg == dw_stack_pointer_regnum)
1665 cur_cfa->offset += offset;
1666 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1667 cur_trace->cfa_store.offset += offset;
1668 }
1669 else if (dest == hard_frame_pointer_rtx)
1670 {
1671 /* Rule 3 */
1672 /* Either setting the FP from an offset of the SP,
1673 or adjusting the FP */
1674 gcc_assert (frame_pointer_needed);
1675
1676 gcc_assert (REG_P (XEXP (src, 0))
1677 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1678 && CONST_INT_P (XEXP (src, 1)));
1679 offset = INTVAL (XEXP (src, 1));
1680 if (GET_CODE (src) != MINUS)
1681 offset = -offset;
1682 cur_cfa->offset += offset;
1683 cur_cfa->reg = dw_frame_pointer_regnum;
1684 }
1685 else
1686 {
1687 gcc_assert (GET_CODE (src) != MINUS);
1688
1689 /* Rule 4 */
1690 if (REG_P (XEXP (src, 0))
1691 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1692 && CONST_INT_P (XEXP (src, 1)))
1693 {
1694 /* Setting a temporary CFA register that will be copied
1695 into the FP later on. */
1696 offset = - INTVAL (XEXP (src, 1));
1697 cur_cfa->offset += offset;
1698 cur_cfa->reg = dwf_regno (dest);
1699 /* Or used to save regs to the stack. */
1700 cur_trace->cfa_temp.reg = cur_cfa->reg;
1701 cur_trace->cfa_temp.offset = cur_cfa->offset;
1702 }
1703
1704 /* Rule 5 */
1705 else if (REG_P (XEXP (src, 0))
1706 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1707 && XEXP (src, 1) == stack_pointer_rtx)
1708 {
1709 /* Setting a scratch register that we will use instead
1710 of SP for saving registers to the stack. */
1711 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1712 cur_trace->cfa_store.reg = dwf_regno (dest);
1713 cur_trace->cfa_store.offset
1714 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1715 }
1716
1717 /* Rule 9 */
1718 else if (GET_CODE (src) == LO_SUM
1719 && CONST_INT_P (XEXP (src, 1)))
1720 {
1721 cur_trace->cfa_temp.reg = dwf_regno (dest);
1722 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1723 }
1724 else
1725 gcc_unreachable ();
1726 }
1727 break;
1728
1729 /* Rule 6 */
1730 case CONST_INT:
1731 cur_trace->cfa_temp.reg = dwf_regno (dest);
1732 cur_trace->cfa_temp.offset = INTVAL (src);
1733 break;
1734
1735 /* Rule 7 */
1736 case IOR:
1737 gcc_assert (REG_P (XEXP (src, 0))
1738 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1739 && CONST_INT_P (XEXP (src, 1)));
1740
1741 cur_trace->cfa_temp.reg = dwf_regno (dest);
1742 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1743 break;
1744
1745 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1746 which will fill in all of the bits. */
1747 /* Rule 8 */
1748 case HIGH:
1749 break;
1750
1751 /* Rule 15 */
1752 case UNSPEC:
1753 case UNSPEC_VOLATILE:
1754 /* All unspecs should be represented by REG_CFA_* notes. */
1755 gcc_unreachable ();
1756 return;
1757
1758 /* Rule 16 */
1759 case AND:
1760 /* If this AND operation happens on stack pointer in prologue,
1761 we assume the stack is realigned and we extract the
1762 alignment. */
1763 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1764 {
1765 /* We interpret reg_save differently with stack_realign set.
1766 Thus we must flush whatever we have queued first. */
1767 dwarf2out_flush_queued_reg_saves ();
1768
1769 gcc_assert (cur_trace->cfa_store.reg
1770 == dwf_regno (XEXP (src, 0)));
1771 fde->stack_realign = 1;
1772 fde->stack_realignment = INTVAL (XEXP (src, 1));
1773 cur_trace->cfa_store.offset = 0;
1774
1775 if (cur_cfa->reg != dw_stack_pointer_regnum
1776 && cur_cfa->reg != dw_frame_pointer_regnum)
1777 fde->drap_reg = cur_cfa->reg;
1778 }
1779 return;
1780
1781 default:
1782 gcc_unreachable ();
1783 }
1784 break;
1785
1786 case MEM:
1787
1788 /* Saving a register to the stack. Make sure dest is relative to the
1789 CFA register. */
1790 switch (GET_CODE (XEXP (dest, 0)))
1791 {
1792 /* Rule 10 */
1793 /* With a push. */
1794 case PRE_MODIFY:
1795 case POST_MODIFY:
1796 /* We can't handle variable size modifications. */
1797 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1798 == CONST_INT);
1799 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1800
1801 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1802 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1803
1804 cur_trace->cfa_store.offset += offset;
1805 if (cur_cfa->reg == dw_stack_pointer_regnum)
1806 cur_cfa->offset = cur_trace->cfa_store.offset;
1807
1808 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1809 offset -= cur_trace->cfa_store.offset;
1810 else
1811 offset = -cur_trace->cfa_store.offset;
1812 break;
1813
1814 /* Rule 11 */
1815 case PRE_INC:
1816 case PRE_DEC:
1817 case POST_DEC:
1818 offset = GET_MODE_SIZE (GET_MODE (dest));
1819 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1820 offset = -offset;
1821
1822 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1823 == STACK_POINTER_REGNUM)
1824 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1825
1826 cur_trace->cfa_store.offset += offset;
1827
1828 /* Rule 18: If stack is aligned, we will use FP as a
1829 reference to represent the address of the stored
1830 regiser. */
1831 if (fde
1832 && fde->stack_realign
1833 && REG_P (src)
1834 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1835 {
1836 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1837 cur_trace->cfa_store.offset = 0;
1838 }
1839
1840 if (cur_cfa->reg == dw_stack_pointer_regnum)
1841 cur_cfa->offset = cur_trace->cfa_store.offset;
1842
1843 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1844 offset += -cur_trace->cfa_store.offset;
1845 else
1846 offset = -cur_trace->cfa_store.offset;
1847 break;
1848
1849 /* Rule 12 */
1850 /* With an offset. */
1851 case PLUS:
1852 case MINUS:
1853 case LO_SUM:
1854 {
1855 unsigned int regno;
1856
1857 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1858 && REG_P (XEXP (XEXP (dest, 0), 0)));
1859 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1860 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1861 offset = -offset;
1862
1863 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1864
1865 if (cur_cfa->reg == regno)
1866 offset -= cur_cfa->offset;
1867 else if (cur_trace->cfa_store.reg == regno)
1868 offset -= cur_trace->cfa_store.offset;
1869 else
1870 {
1871 gcc_assert (cur_trace->cfa_temp.reg == regno);
1872 offset -= cur_trace->cfa_temp.offset;
1873 }
1874 }
1875 break;
1876
1877 /* Rule 13 */
1878 /* Without an offset. */
1879 case REG:
1880 {
1881 unsigned int regno = dwf_regno (XEXP (dest, 0));
1882
1883 if (cur_cfa->reg == regno)
1884 offset = -cur_cfa->offset;
1885 else if (cur_trace->cfa_store.reg == regno)
1886 offset = -cur_trace->cfa_store.offset;
1887 else
1888 {
1889 gcc_assert (cur_trace->cfa_temp.reg == regno);
1890 offset = -cur_trace->cfa_temp.offset;
1891 }
1892 }
1893 break;
1894
1895 /* Rule 14 */
1896 case POST_INC:
1897 gcc_assert (cur_trace->cfa_temp.reg
1898 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1899 offset = -cur_trace->cfa_temp.offset;
1900 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1901 break;
1902
1903 default:
1904 gcc_unreachable ();
1905 }
1906
1907 /* Rule 17 */
1908 /* If the source operand of this MEM operation is a memory,
1909 we only care how much stack grew. */
1910 if (MEM_P (src))
1911 break;
1912
1913 if (REG_P (src)
1914 && REGNO (src) != STACK_POINTER_REGNUM
1915 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1916 && dwf_regno (src) == cur_cfa->reg)
1917 {
1918 /* We're storing the current CFA reg into the stack. */
1919
1920 if (cur_cfa->offset == 0)
1921 {
1922 /* Rule 19 */
1923 /* If stack is aligned, putting CFA reg into stack means
1924 we can no longer use reg + offset to represent CFA.
1925 Here we use DW_CFA_def_cfa_expression instead. The
1926 result of this expression equals to the original CFA
1927 value. */
1928 if (fde
1929 && fde->stack_realign
1930 && cur_cfa->indirect == 0
1931 && cur_cfa->reg != dw_frame_pointer_regnum)
1932 {
1933 gcc_assert (fde->drap_reg == cur_cfa->reg);
1934
1935 cur_cfa->indirect = 1;
1936 cur_cfa->reg = dw_frame_pointer_regnum;
1937 cur_cfa->base_offset = offset;
1938 cur_cfa->offset = 0;
1939
1940 fde->drap_reg_saved = 1;
1941 break;
1942 }
1943
1944 /* If the source register is exactly the CFA, assume
1945 we're saving SP like any other register; this happens
1946 on the ARM. */
1947 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1948 break;
1949 }
1950 else
1951 {
1952 /* Otherwise, we'll need to look in the stack to
1953 calculate the CFA. */
1954 rtx x = XEXP (dest, 0);
1955
1956 if (!REG_P (x))
1957 x = XEXP (x, 0);
1958 gcc_assert (REG_P (x));
1959
1960 cur_cfa->reg = dwf_regno (x);
1961 cur_cfa->base_offset = offset;
1962 cur_cfa->indirect = 1;
1963 break;
1964 }
1965 }
1966
1967 if (REG_P (src))
1968 span = targetm.dwarf_register_span (src);
1969 else
1970 span = NULL;
1971
1972 if (!span)
1973 queue_reg_save (src, NULL_RTX, offset);
1974 else
1975 {
1976 /* We have a PARALLEL describing where the contents of SRC live.
1977 Queue register saves for each piece of the PARALLEL. */
1978 HOST_WIDE_INT span_offset = offset;
1979
1980 gcc_assert (GET_CODE (span) == PARALLEL);
1981
1982 const int par_len = XVECLEN (span, 0);
1983 for (int par_index = 0; par_index < par_len; par_index++)
1984 {
1985 rtx elem = XVECEXP (span, 0, par_index);
1986 queue_reg_save (elem, NULL_RTX, span_offset);
1987 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1988 }
1989 }
1990 break;
1991
1992 default:
1993 gcc_unreachable ();
1994 }
1995 }
1996
1997 /* Record call frame debugging information for INSN, which either sets
1998 SP or FP (adjusting how we calculate the frame address) or saves a
1999 register to the stack. */
2000
2001 static void
2002 dwarf2out_frame_debug (rtx_insn *insn)
2003 {
2004 rtx note, n, pat;
2005 bool handled_one = false;
2006
2007 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2008 switch (REG_NOTE_KIND (note))
2009 {
2010 case REG_FRAME_RELATED_EXPR:
2011 pat = XEXP (note, 0);
2012 goto do_frame_expr;
2013
2014 case REG_CFA_DEF_CFA:
2015 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2016 handled_one = true;
2017 break;
2018
2019 case REG_CFA_ADJUST_CFA:
2020 n = XEXP (note, 0);
2021 if (n == NULL)
2022 {
2023 n = PATTERN (insn);
2024 if (GET_CODE (n) == PARALLEL)
2025 n = XVECEXP (n, 0, 0);
2026 }
2027 dwarf2out_frame_debug_adjust_cfa (n);
2028 handled_one = true;
2029 break;
2030
2031 case REG_CFA_OFFSET:
2032 n = XEXP (note, 0);
2033 if (n == NULL)
2034 n = single_set (insn);
2035 dwarf2out_frame_debug_cfa_offset (n);
2036 handled_one = true;
2037 break;
2038
2039 case REG_CFA_REGISTER:
2040 n = XEXP (note, 0);
2041 if (n == NULL)
2042 {
2043 n = PATTERN (insn);
2044 if (GET_CODE (n) == PARALLEL)
2045 n = XVECEXP (n, 0, 0);
2046 }
2047 dwarf2out_frame_debug_cfa_register (n);
2048 handled_one = true;
2049 break;
2050
2051 case REG_CFA_EXPRESSION:
2052 n = XEXP (note, 0);
2053 if (n == NULL)
2054 n = single_set (insn);
2055 dwarf2out_frame_debug_cfa_expression (n);
2056 handled_one = true;
2057 break;
2058
2059 case REG_CFA_RESTORE:
2060 n = XEXP (note, 0);
2061 if (n == NULL)
2062 {
2063 n = PATTERN (insn);
2064 if (GET_CODE (n) == PARALLEL)
2065 n = XVECEXP (n, 0, 0);
2066 n = XEXP (n, 0);
2067 }
2068 dwarf2out_frame_debug_cfa_restore (n);
2069 handled_one = true;
2070 break;
2071
2072 case REG_CFA_SET_VDRAP:
2073 n = XEXP (note, 0);
2074 if (REG_P (n))
2075 {
2076 dw_fde_ref fde = cfun->fde;
2077 if (fde)
2078 {
2079 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2080 if (REG_P (n))
2081 fde->vdrap_reg = dwf_regno (n);
2082 }
2083 }
2084 handled_one = true;
2085 break;
2086
2087 case REG_CFA_WINDOW_SAVE:
2088 dwarf2out_frame_debug_cfa_window_save ();
2089 handled_one = true;
2090 break;
2091
2092 case REG_CFA_FLUSH_QUEUE:
2093 /* The actual flush happens elsewhere. */
2094 handled_one = true;
2095 break;
2096
2097 default:
2098 break;
2099 }
2100
2101 if (!handled_one)
2102 {
2103 pat = PATTERN (insn);
2104 do_frame_expr:
2105 dwarf2out_frame_debug_expr (pat);
2106
2107 /* Check again. A parallel can save and update the same register.
2108 We could probably check just once, here, but this is safer than
2109 removing the check at the start of the function. */
2110 if (clobbers_queued_reg_save (pat))
2111 dwarf2out_flush_queued_reg_saves ();
2112 }
2113 }
2114
2115 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2116
2117 static void
2118 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2119 {
2120 size_t i, n_old, n_new, n_max;
2121 dw_cfi_ref cfi;
2122
2123 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2124 add_cfi (new_row->cfa_cfi);
2125 else
2126 {
2127 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2128 if (cfi)
2129 add_cfi (cfi);
2130 }
2131
2132 n_old = vec_safe_length (old_row->reg_save);
2133 n_new = vec_safe_length (new_row->reg_save);
2134 n_max = MAX (n_old, n_new);
2135
2136 for (i = 0; i < n_max; ++i)
2137 {
2138 dw_cfi_ref r_old = NULL, r_new = NULL;
2139
2140 if (i < n_old)
2141 r_old = (*old_row->reg_save)[i];
2142 if (i < n_new)
2143 r_new = (*new_row->reg_save)[i];
2144
2145 if (r_old == r_new)
2146 ;
2147 else if (r_new == NULL)
2148 add_cfi_restore (i);
2149 else if (!cfi_equal_p (r_old, r_new))
2150 add_cfi (r_new);
2151 }
2152 }
2153
2154 /* Examine CFI and return true if a cfi label and set_loc is needed
2155 beforehand. Even when generating CFI assembler instructions, we
2156 still have to add the cfi to the list so that lookup_cfa_1 works
2157 later on. When -g2 and above we even need to force emitting of
2158 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2159 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2160 and so don't use convert_cfa_to_fb_loc_list. */
2161
2162 static bool
2163 cfi_label_required_p (dw_cfi_ref cfi)
2164 {
2165 if (!dwarf2out_do_cfi_asm ())
2166 return true;
2167
2168 if (dwarf_version == 2
2169 && debug_info_level > DINFO_LEVEL_TERSE
2170 && (write_symbols == DWARF2_DEBUG
2171 || write_symbols == VMS_AND_DWARF2_DEBUG))
2172 {
2173 switch (cfi->dw_cfi_opc)
2174 {
2175 case DW_CFA_def_cfa_offset:
2176 case DW_CFA_def_cfa_offset_sf:
2177 case DW_CFA_def_cfa_register:
2178 case DW_CFA_def_cfa:
2179 case DW_CFA_def_cfa_sf:
2180 case DW_CFA_def_cfa_expression:
2181 case DW_CFA_restore_state:
2182 return true;
2183 default:
2184 return false;
2185 }
2186 }
2187 return false;
2188 }
2189
2190 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2191 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2192 necessary. */
2193 static void
2194 add_cfis_to_fde (void)
2195 {
2196 dw_fde_ref fde = cfun->fde;
2197 rtx_insn *insn, *next;
2198 /* We always start with a function_begin label. */
2199 bool first = false;
2200
2201 for (insn = get_insns (); insn; insn = next)
2202 {
2203 next = NEXT_INSN (insn);
2204
2205 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2206 {
2207 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2208 /* Don't attempt to advance_loc4 between labels
2209 in different sections. */
2210 first = true;
2211 }
2212
2213 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2214 {
2215 bool required = cfi_label_required_p (NOTE_CFI (insn));
2216 while (next)
2217 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2218 {
2219 required |= cfi_label_required_p (NOTE_CFI (next));
2220 next = NEXT_INSN (next);
2221 }
2222 else if (active_insn_p (next)
2223 || (NOTE_P (next) && (NOTE_KIND (next)
2224 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2225 break;
2226 else
2227 next = NEXT_INSN (next);
2228 if (required)
2229 {
2230 int num = dwarf2out_cfi_label_num;
2231 const char *label = dwarf2out_cfi_label ();
2232 dw_cfi_ref xcfi;
2233
2234 /* Set the location counter to the new label. */
2235 xcfi = new_cfi ();
2236 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2237 : DW_CFA_advance_loc4);
2238 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2239 vec_safe_push (fde->dw_fde_cfi, xcfi);
2240
2241 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2242 NOTE_LABEL_NUMBER (tmp) = num;
2243 }
2244
2245 do
2246 {
2247 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2248 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2249 insn = NEXT_INSN (insn);
2250 }
2251 while (insn != next);
2252 first = false;
2253 }
2254 }
2255 }
2256
2257 /* If LABEL is the start of a trace, then initialize the state of that
2258 trace from CUR_TRACE and CUR_ROW. */
2259
2260 static void
2261 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2262 {
2263 dw_trace_info *ti;
2264 HOST_WIDE_INT args_size;
2265
2266 ti = get_trace_info (start);
2267 gcc_assert (ti != NULL);
2268
2269 if (dump_file)
2270 {
2271 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2272 cur_trace->id, ti->id,
2273 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2274 (origin ? INSN_UID (origin) : 0));
2275 }
2276
2277 args_size = cur_trace->end_true_args_size;
2278 if (ti->beg_row == NULL)
2279 {
2280 /* This is the first time we've encountered this trace. Propagate
2281 state across the edge and push the trace onto the work list. */
2282 ti->beg_row = copy_cfi_row (cur_row);
2283 ti->beg_true_args_size = args_size;
2284
2285 ti->cfa_store = cur_trace->cfa_store;
2286 ti->cfa_temp = cur_trace->cfa_temp;
2287 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2288
2289 trace_work_list.safe_push (ti);
2290
2291 if (dump_file)
2292 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2293 }
2294 else
2295 {
2296
2297 /* We ought to have the same state incoming to a given trace no
2298 matter how we arrive at the trace. Anything else means we've
2299 got some kind of optimization error. */
2300 gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
2301
2302 /* The args_size is allowed to conflict if it isn't actually used. */
2303 if (ti->beg_true_args_size != args_size)
2304 ti->args_size_undefined = true;
2305 }
2306 }
2307
2308 /* Similarly, but handle the args_size and CFA reset across EH
2309 and non-local goto edges. */
2310
2311 static void
2312 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2313 {
2314 HOST_WIDE_INT save_args_size, delta;
2315 dw_cfa_location save_cfa;
2316
2317 save_args_size = cur_trace->end_true_args_size;
2318 if (save_args_size == 0)
2319 {
2320 maybe_record_trace_start (start, origin);
2321 return;
2322 }
2323
2324 delta = -save_args_size;
2325 cur_trace->end_true_args_size = 0;
2326
2327 save_cfa = cur_row->cfa;
2328 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2329 {
2330 /* Convert a change in args_size (always a positive in the
2331 direction of stack growth) to a change in stack pointer. */
2332 if (!STACK_GROWS_DOWNWARD)
2333 delta = -delta;
2334
2335 cur_row->cfa.offset += delta;
2336 }
2337
2338 maybe_record_trace_start (start, origin);
2339
2340 cur_trace->end_true_args_size = save_args_size;
2341 cur_row->cfa = save_cfa;
2342 }
2343
2344 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2345 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2346
2347 static void
2348 create_trace_edges (rtx_insn *insn)
2349 {
2350 rtx tmp;
2351 int i, n;
2352
2353 if (JUMP_P (insn))
2354 {
2355 rtx_jump_table_data *table;
2356
2357 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2358 return;
2359
2360 if (tablejump_p (insn, NULL, &table))
2361 {
2362 rtvec vec = table->get_labels ();
2363
2364 n = GET_NUM_ELEM (vec);
2365 for (i = 0; i < n; ++i)
2366 {
2367 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2368 maybe_record_trace_start (lab, insn);
2369 }
2370 }
2371 else if (computed_jump_p (insn))
2372 {
2373 for (rtx_insn_list *lab = forced_labels; lab; lab = lab->next ())
2374 maybe_record_trace_start (lab->insn (), insn);
2375 }
2376 else if (returnjump_p (insn))
2377 ;
2378 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2379 {
2380 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2381 for (i = 0; i < n; ++i)
2382 {
2383 rtx_insn *lab =
2384 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2385 maybe_record_trace_start (lab, insn);
2386 }
2387 }
2388 else
2389 {
2390 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2391 gcc_assert (lab != NULL);
2392 maybe_record_trace_start (lab, insn);
2393 }
2394 }
2395 else if (CALL_P (insn))
2396 {
2397 /* Sibling calls don't have edges inside this function. */
2398 if (SIBLING_CALL_P (insn))
2399 return;
2400
2401 /* Process non-local goto edges. */
2402 if (can_nonlocal_goto (insn))
2403 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2404 lab;
2405 lab = lab->next ())
2406 maybe_record_trace_start_abnormal (lab->insn (), insn);
2407 }
2408 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2409 {
2410 int i, n = seq->len ();
2411 for (i = 0; i < n; ++i)
2412 create_trace_edges (seq->insn (i));
2413 return;
2414 }
2415
2416 /* Process EH edges. */
2417 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2418 {
2419 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2420 if (lp)
2421 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2422 }
2423 }
2424
2425 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2426
2427 static void
2428 scan_insn_after (rtx_insn *insn)
2429 {
2430 if (RTX_FRAME_RELATED_P (insn))
2431 dwarf2out_frame_debug (insn);
2432 notice_args_size (insn);
2433 }
2434
2435 /* Scan the trace beginning at INSN and create the CFI notes for the
2436 instructions therein. */
2437
2438 static void
2439 scan_trace (dw_trace_info *trace)
2440 {
2441 rtx_insn *prev, *insn = trace->head;
2442 dw_cfa_location this_cfa;
2443
2444 if (dump_file)
2445 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2446 trace->id, rtx_name[(int) GET_CODE (insn)],
2447 INSN_UID (insn));
2448
2449 trace->end_row = copy_cfi_row (trace->beg_row);
2450 trace->end_true_args_size = trace->beg_true_args_size;
2451
2452 cur_trace = trace;
2453 cur_row = trace->end_row;
2454
2455 this_cfa = cur_row->cfa;
2456 cur_cfa = &this_cfa;
2457
2458 for (prev = insn, insn = NEXT_INSN (insn);
2459 insn;
2460 prev = insn, insn = NEXT_INSN (insn))
2461 {
2462 rtx_insn *control;
2463
2464 /* Do everything that happens "before" the insn. */
2465 add_cfi_insn = prev;
2466
2467 /* Notice the end of a trace. */
2468 if (BARRIER_P (insn))
2469 {
2470 /* Don't bother saving the unneeded queued registers at all. */
2471 queued_reg_saves.truncate (0);
2472 break;
2473 }
2474 if (save_point_p (insn))
2475 {
2476 /* Propagate across fallthru edges. */
2477 dwarf2out_flush_queued_reg_saves ();
2478 maybe_record_trace_start (insn, NULL);
2479 break;
2480 }
2481
2482 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2483 continue;
2484
2485 /* Handle all changes to the row state. Sequences require special
2486 handling for the positioning of the notes. */
2487 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2488 {
2489 rtx_insn *elt;
2490 int i, n = pat->len ();
2491
2492 control = pat->insn (0);
2493 if (can_throw_internal (control))
2494 notice_eh_throw (control);
2495 dwarf2out_flush_queued_reg_saves ();
2496
2497 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2498 {
2499 /* ??? Hopefully multiple delay slots are not annulled. */
2500 gcc_assert (n == 2);
2501 gcc_assert (!RTX_FRAME_RELATED_P (control));
2502 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2503
2504 elt = pat->insn (1);
2505
2506 if (INSN_FROM_TARGET_P (elt))
2507 {
2508 HOST_WIDE_INT restore_args_size;
2509 cfi_vec save_row_reg_save;
2510
2511 /* If ELT is an instruction from target of an annulled
2512 branch, the effects are for the target only and so
2513 the args_size and CFA along the current path
2514 shouldn't change. */
2515 add_cfi_insn = NULL;
2516 restore_args_size = cur_trace->end_true_args_size;
2517 cur_cfa = &cur_row->cfa;
2518 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2519
2520 scan_insn_after (elt);
2521
2522 /* ??? Should we instead save the entire row state? */
2523 gcc_assert (!queued_reg_saves.length ());
2524
2525 create_trace_edges (control);
2526
2527 cur_trace->end_true_args_size = restore_args_size;
2528 cur_row->cfa = this_cfa;
2529 cur_row->reg_save = save_row_reg_save;
2530 cur_cfa = &this_cfa;
2531 }
2532 else
2533 {
2534 /* If ELT is a annulled branch-taken instruction (i.e.
2535 executed only when branch is not taken), the args_size
2536 and CFA should not change through the jump. */
2537 create_trace_edges (control);
2538
2539 /* Update and continue with the trace. */
2540 add_cfi_insn = insn;
2541 scan_insn_after (elt);
2542 def_cfa_1 (&this_cfa);
2543 }
2544 continue;
2545 }
2546
2547 /* The insns in the delay slot should all be considered to happen
2548 "before" a call insn. Consider a call with a stack pointer
2549 adjustment in the delay slot. The backtrace from the callee
2550 should include the sp adjustment. Unfortunately, that leaves
2551 us with an unavoidable unwinding error exactly at the call insn
2552 itself. For jump insns we'd prefer to avoid this error by
2553 placing the notes after the sequence. */
2554 if (JUMP_P (control))
2555 add_cfi_insn = insn;
2556
2557 for (i = 1; i < n; ++i)
2558 {
2559 elt = pat->insn (i);
2560 scan_insn_after (elt);
2561 }
2562
2563 /* Make sure any register saves are visible at the jump target. */
2564 dwarf2out_flush_queued_reg_saves ();
2565 any_cfis_emitted = false;
2566
2567 /* However, if there is some adjustment on the call itself, e.g.
2568 a call_pop, that action should be considered to happen after
2569 the call returns. */
2570 add_cfi_insn = insn;
2571 scan_insn_after (control);
2572 }
2573 else
2574 {
2575 /* Flush data before calls and jumps, and of course if necessary. */
2576 if (can_throw_internal (insn))
2577 {
2578 notice_eh_throw (insn);
2579 dwarf2out_flush_queued_reg_saves ();
2580 }
2581 else if (!NONJUMP_INSN_P (insn)
2582 || clobbers_queued_reg_save (insn)
2583 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2584 dwarf2out_flush_queued_reg_saves ();
2585 any_cfis_emitted = false;
2586
2587 add_cfi_insn = insn;
2588 scan_insn_after (insn);
2589 control = insn;
2590 }
2591
2592 /* Between frame-related-p and args_size we might have otherwise
2593 emitted two cfa adjustments. Do it now. */
2594 def_cfa_1 (&this_cfa);
2595
2596 /* Minimize the number of advances by emitting the entire queue
2597 once anything is emitted. */
2598 if (any_cfis_emitted
2599 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2600 dwarf2out_flush_queued_reg_saves ();
2601
2602 /* Note that a test for control_flow_insn_p does exactly the
2603 same tests as are done to actually create the edges. So
2604 always call the routine and let it not create edges for
2605 non-control-flow insns. */
2606 create_trace_edges (control);
2607 }
2608
2609 add_cfi_insn = NULL;
2610 cur_row = NULL;
2611 cur_trace = NULL;
2612 cur_cfa = NULL;
2613 }
2614
2615 /* Scan the function and create the initial set of CFI notes. */
2616
2617 static void
2618 create_cfi_notes (void)
2619 {
2620 dw_trace_info *ti;
2621
2622 gcc_checking_assert (!queued_reg_saves.exists ());
2623 gcc_checking_assert (!trace_work_list.exists ());
2624
2625 /* Always begin at the entry trace. */
2626 ti = &trace_info[0];
2627 scan_trace (ti);
2628
2629 while (!trace_work_list.is_empty ())
2630 {
2631 ti = trace_work_list.pop ();
2632 scan_trace (ti);
2633 }
2634
2635 queued_reg_saves.release ();
2636 trace_work_list.release ();
2637 }
2638
2639 /* Return the insn before the first NOTE_INSN_CFI after START. */
2640
2641 static rtx_insn *
2642 before_next_cfi_note (rtx_insn *start)
2643 {
2644 rtx_insn *prev = start;
2645 while (start)
2646 {
2647 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2648 return prev;
2649 prev = start;
2650 start = NEXT_INSN (start);
2651 }
2652 gcc_unreachable ();
2653 }
2654
2655 /* Insert CFI notes between traces to properly change state between them. */
2656
2657 static void
2658 connect_traces (void)
2659 {
2660 unsigned i, n = trace_info.length ();
2661 dw_trace_info *prev_ti, *ti;
2662
2663 /* ??? Ideally, we should have both queued and processed every trace.
2664 However the current representation of constant pools on various targets
2665 is indistinguishable from unreachable code. Assume for the moment that
2666 we can simply skip over such traces. */
2667 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2668 these are not "real" instructions, and should not be considered.
2669 This could be generically useful for tablejump data as well. */
2670 /* Remove all unprocessed traces from the list. */
2671 for (i = n - 1; i > 0; --i)
2672 {
2673 ti = &trace_info[i];
2674 if (ti->beg_row == NULL)
2675 {
2676 trace_info.ordered_remove (i);
2677 n -= 1;
2678 }
2679 else
2680 gcc_assert (ti->end_row != NULL);
2681 }
2682
2683 /* Work from the end back to the beginning. This lets us easily insert
2684 remember/restore_state notes in the correct order wrt other notes. */
2685 prev_ti = &trace_info[n - 1];
2686 for (i = n - 1; i > 0; --i)
2687 {
2688 dw_cfi_row *old_row;
2689
2690 ti = prev_ti;
2691 prev_ti = &trace_info[i - 1];
2692
2693 add_cfi_insn = ti->head;
2694
2695 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2696 for the portion of the function in the alternate text
2697 section. The row state at the very beginning of that
2698 new FDE will be exactly the row state from the CIE. */
2699 if (ti->switch_sections)
2700 old_row = cie_cfi_row;
2701 else
2702 {
2703 old_row = prev_ti->end_row;
2704 /* If there's no change from the previous end state, fine. */
2705 if (cfi_row_equal_p (old_row, ti->beg_row))
2706 ;
2707 /* Otherwise check for the common case of sharing state with
2708 the beginning of an epilogue, but not the end. Insert
2709 remember/restore opcodes in that case. */
2710 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2711 {
2712 dw_cfi_ref cfi;
2713
2714 /* Note that if we blindly insert the remember at the
2715 start of the trace, we can wind up increasing the
2716 size of the unwind info due to extra advance opcodes.
2717 Instead, put the remember immediately before the next
2718 state change. We know there must be one, because the
2719 state at the beginning and head of the trace differ. */
2720 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2721 cfi = new_cfi ();
2722 cfi->dw_cfi_opc = DW_CFA_remember_state;
2723 add_cfi (cfi);
2724
2725 add_cfi_insn = ti->head;
2726 cfi = new_cfi ();
2727 cfi->dw_cfi_opc = DW_CFA_restore_state;
2728 add_cfi (cfi);
2729
2730 old_row = prev_ti->beg_row;
2731 }
2732 /* Otherwise, we'll simply change state from the previous end. */
2733 }
2734
2735 change_cfi_row (old_row, ti->beg_row);
2736
2737 if (dump_file && add_cfi_insn != ti->head)
2738 {
2739 rtx_insn *note;
2740
2741 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2742 prev_ti->id, ti->id);
2743
2744 note = ti->head;
2745 do
2746 {
2747 note = NEXT_INSN (note);
2748 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2749 output_cfi_directive (dump_file, NOTE_CFI (note));
2750 }
2751 while (note != add_cfi_insn);
2752 }
2753 }
2754
2755 /* Connect args_size between traces that have can_throw_internal insns. */
2756 if (cfun->eh->lp_array)
2757 {
2758 HOST_WIDE_INT prev_args_size = 0;
2759
2760 for (i = 0; i < n; ++i)
2761 {
2762 ti = &trace_info[i];
2763
2764 if (ti->switch_sections)
2765 prev_args_size = 0;
2766 if (ti->eh_head == NULL)
2767 continue;
2768 gcc_assert (!ti->args_size_undefined);
2769
2770 if (ti->beg_delay_args_size != prev_args_size)
2771 {
2772 /* ??? Search back to previous CFI note. */
2773 add_cfi_insn = PREV_INSN (ti->eh_head);
2774 add_cfi_args_size (ti->beg_delay_args_size);
2775 }
2776
2777 prev_args_size = ti->end_delay_args_size;
2778 }
2779 }
2780 }
2781
2782 /* Set up the pseudo-cfg of instruction traces, as described at the
2783 block comment at the top of the file. */
2784
2785 static void
2786 create_pseudo_cfg (void)
2787 {
2788 bool saw_barrier, switch_sections;
2789 dw_trace_info ti;
2790 rtx_insn *insn;
2791 unsigned i;
2792
2793 /* The first trace begins at the start of the function,
2794 and begins with the CIE row state. */
2795 trace_info.create (16);
2796 memset (&ti, 0, sizeof (ti));
2797 ti.head = get_insns ();
2798 ti.beg_row = cie_cfi_row;
2799 ti.cfa_store = cie_cfi_row->cfa;
2800 ti.cfa_temp.reg = INVALID_REGNUM;
2801 trace_info.quick_push (ti);
2802
2803 if (cie_return_save)
2804 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2805
2806 /* Walk all the insns, collecting start of trace locations. */
2807 saw_barrier = false;
2808 switch_sections = false;
2809 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2810 {
2811 if (BARRIER_P (insn))
2812 saw_barrier = true;
2813 else if (NOTE_P (insn)
2814 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2815 {
2816 /* We should have just seen a barrier. */
2817 gcc_assert (saw_barrier);
2818 switch_sections = true;
2819 }
2820 /* Watch out for save_point notes between basic blocks.
2821 In particular, a note after a barrier. Do not record these,
2822 delaying trace creation until the label. */
2823 else if (save_point_p (insn)
2824 && (LABEL_P (insn) || !saw_barrier))
2825 {
2826 memset (&ti, 0, sizeof (ti));
2827 ti.head = insn;
2828 ti.switch_sections = switch_sections;
2829 ti.id = trace_info.length ();
2830 trace_info.safe_push (ti);
2831
2832 saw_barrier = false;
2833 switch_sections = false;
2834 }
2835 }
2836
2837 /* Create the trace index after we've finished building trace_info,
2838 avoiding stale pointer problems due to reallocation. */
2839 trace_index
2840 = new hash_table<trace_info_hasher> (trace_info.length ());
2841 dw_trace_info *tp;
2842 FOR_EACH_VEC_ELT (trace_info, i, tp)
2843 {
2844 dw_trace_info **slot;
2845
2846 if (dump_file)
2847 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2848 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2849 tp->switch_sections ? " (section switch)" : "");
2850
2851 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2852 gcc_assert (*slot == NULL);
2853 *slot = tp;
2854 }
2855 }
2856
2857 /* Record the initial position of the return address. RTL is
2858 INCOMING_RETURN_ADDR_RTX. */
2859
2860 static void
2861 initial_return_save (rtx rtl)
2862 {
2863 unsigned int reg = INVALID_REGNUM;
2864 HOST_WIDE_INT offset = 0;
2865
2866 switch (GET_CODE (rtl))
2867 {
2868 case REG:
2869 /* RA is in a register. */
2870 reg = dwf_regno (rtl);
2871 break;
2872
2873 case MEM:
2874 /* RA is on the stack. */
2875 rtl = XEXP (rtl, 0);
2876 switch (GET_CODE (rtl))
2877 {
2878 case REG:
2879 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2880 offset = 0;
2881 break;
2882
2883 case PLUS:
2884 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2885 offset = INTVAL (XEXP (rtl, 1));
2886 break;
2887
2888 case MINUS:
2889 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2890 offset = -INTVAL (XEXP (rtl, 1));
2891 break;
2892
2893 default:
2894 gcc_unreachable ();
2895 }
2896
2897 break;
2898
2899 case PLUS:
2900 /* The return address is at some offset from any value we can
2901 actually load. For instance, on the SPARC it is in %i7+8. Just
2902 ignore the offset for now; it doesn't matter for unwinding frames. */
2903 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2904 initial_return_save (XEXP (rtl, 0));
2905 return;
2906
2907 default:
2908 gcc_unreachable ();
2909 }
2910
2911 if (reg != DWARF_FRAME_RETURN_COLUMN)
2912 {
2913 if (reg != INVALID_REGNUM)
2914 record_reg_saved_in_reg (rtl, pc_rtx);
2915 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2916 }
2917 }
2918
2919 static void
2920 create_cie_data (void)
2921 {
2922 dw_cfa_location loc;
2923 dw_trace_info cie_trace;
2924
2925 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2926
2927 memset (&cie_trace, 0, sizeof (cie_trace));
2928 cur_trace = &cie_trace;
2929
2930 add_cfi_vec = &cie_cfi_vec;
2931 cie_cfi_row = cur_row = new_cfi_row ();
2932
2933 /* On entry, the Canonical Frame Address is at SP. */
2934 memset (&loc, 0, sizeof (loc));
2935 loc.reg = dw_stack_pointer_regnum;
2936 loc.offset = INCOMING_FRAME_SP_OFFSET;
2937 def_cfa_1 (&loc);
2938
2939 if (targetm.debug_unwind_info () == UI_DWARF2
2940 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2941 {
2942 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2943
2944 /* For a few targets, we have the return address incoming into a
2945 register, but choose a different return column. This will result
2946 in a DW_CFA_register for the return, and an entry in
2947 regs_saved_in_regs to match. If the target later stores that
2948 return address register to the stack, we want to be able to emit
2949 the DW_CFA_offset against the return column, not the intermediate
2950 save register. Save the contents of regs_saved_in_regs so that
2951 we can re-initialize it at the start of each function. */
2952 switch (cie_trace.regs_saved_in_regs.length ())
2953 {
2954 case 0:
2955 break;
2956 case 1:
2957 cie_return_save = ggc_alloc<reg_saved_in_data> ();
2958 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2959 cie_trace.regs_saved_in_regs.release ();
2960 break;
2961 default:
2962 gcc_unreachable ();
2963 }
2964 }
2965
2966 add_cfi_vec = NULL;
2967 cur_row = NULL;
2968 cur_trace = NULL;
2969 }
2970
2971 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2972 state at each location within the function. These notes will be
2973 emitted during pass_final. */
2974
2975 static unsigned int
2976 execute_dwarf2_frame (void)
2977 {
2978 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
2979 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2980
2981 /* The first time we're called, compute the incoming frame state. */
2982 if (cie_cfi_vec == NULL)
2983 create_cie_data ();
2984
2985 dwarf2out_alloc_current_fde ();
2986
2987 create_pseudo_cfg ();
2988
2989 /* Do the work. */
2990 create_cfi_notes ();
2991 connect_traces ();
2992 add_cfis_to_fde ();
2993
2994 /* Free all the data we allocated. */
2995 {
2996 size_t i;
2997 dw_trace_info *ti;
2998
2999 FOR_EACH_VEC_ELT (trace_info, i, ti)
3000 ti->regs_saved_in_regs.release ();
3001 }
3002 trace_info.release ();
3003
3004 delete trace_index;
3005 trace_index = NULL;
3006
3007 return 0;
3008 }
3009 \f
3010 /* Convert a DWARF call frame info. operation to its string name */
3011
3012 static const char *
3013 dwarf_cfi_name (unsigned int cfi_opc)
3014 {
3015 const char *name = get_DW_CFA_name (cfi_opc);
3016
3017 if (name != NULL)
3018 return name;
3019
3020 return "DW_CFA_<unknown>";
3021 }
3022
3023 /* This routine will generate the correct assembly data for a location
3024 description based on a cfi entry with a complex address. */
3025
3026 static void
3027 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3028 {
3029 dw_loc_descr_ref loc;
3030 unsigned long size;
3031
3032 if (cfi->dw_cfi_opc == DW_CFA_expression)
3033 {
3034 unsigned r =
3035 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3036 dw2_asm_output_data (1, r, NULL);
3037 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3038 }
3039 else
3040 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3041
3042 /* Output the size of the block. */
3043 size = size_of_locs (loc);
3044 dw2_asm_output_data_uleb128 (size, NULL);
3045
3046 /* Now output the operations themselves. */
3047 output_loc_sequence (loc, for_eh);
3048 }
3049
3050 /* Similar, but used for .cfi_escape. */
3051
3052 static void
3053 output_cfa_loc_raw (dw_cfi_ref cfi)
3054 {
3055 dw_loc_descr_ref loc;
3056 unsigned long size;
3057
3058 if (cfi->dw_cfi_opc == DW_CFA_expression)
3059 {
3060 unsigned r =
3061 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3062 fprintf (asm_out_file, "%#x,", r);
3063 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3064 }
3065 else
3066 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3067
3068 /* Output the size of the block. */
3069 size = size_of_locs (loc);
3070 dw2_asm_output_data_uleb128_raw (size);
3071 fputc (',', asm_out_file);
3072
3073 /* Now output the operations themselves. */
3074 output_loc_sequence_raw (loc);
3075 }
3076
3077 /* Output a Call Frame Information opcode and its operand(s). */
3078
3079 void
3080 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3081 {
3082 unsigned long r;
3083 HOST_WIDE_INT off;
3084
3085 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3086 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3087 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3088 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3089 ((unsigned HOST_WIDE_INT)
3090 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3091 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3092 {
3093 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3094 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3095 "DW_CFA_offset, column %#lx", r);
3096 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3097 dw2_asm_output_data_uleb128 (off, NULL);
3098 }
3099 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3100 {
3101 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3102 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3103 "DW_CFA_restore, column %#lx", r);
3104 }
3105 else
3106 {
3107 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3108 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3109
3110 switch (cfi->dw_cfi_opc)
3111 {
3112 case DW_CFA_set_loc:
3113 if (for_eh)
3114 dw2_asm_output_encoded_addr_rtx (
3115 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3116 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3117 false, NULL);
3118 else
3119 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3120 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3121 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3122 break;
3123
3124 case DW_CFA_advance_loc1:
3125 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3126 fde->dw_fde_current_label, NULL);
3127 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3128 break;
3129
3130 case DW_CFA_advance_loc2:
3131 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3132 fde->dw_fde_current_label, NULL);
3133 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3134 break;
3135
3136 case DW_CFA_advance_loc4:
3137 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3138 fde->dw_fde_current_label, NULL);
3139 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3140 break;
3141
3142 case DW_CFA_MIPS_advance_loc8:
3143 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3144 fde->dw_fde_current_label, NULL);
3145 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3146 break;
3147
3148 case DW_CFA_offset_extended:
3149 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3150 dw2_asm_output_data_uleb128 (r, NULL);
3151 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3152 dw2_asm_output_data_uleb128 (off, NULL);
3153 break;
3154
3155 case DW_CFA_def_cfa:
3156 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3157 dw2_asm_output_data_uleb128 (r, NULL);
3158 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3159 break;
3160
3161 case DW_CFA_offset_extended_sf:
3162 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3163 dw2_asm_output_data_uleb128 (r, NULL);
3164 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3165 dw2_asm_output_data_sleb128 (off, NULL);
3166 break;
3167
3168 case DW_CFA_def_cfa_sf:
3169 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3170 dw2_asm_output_data_uleb128 (r, NULL);
3171 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3172 dw2_asm_output_data_sleb128 (off, NULL);
3173 break;
3174
3175 case DW_CFA_restore_extended:
3176 case DW_CFA_undefined:
3177 case DW_CFA_same_value:
3178 case DW_CFA_def_cfa_register:
3179 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3180 dw2_asm_output_data_uleb128 (r, NULL);
3181 break;
3182
3183 case DW_CFA_register:
3184 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3185 dw2_asm_output_data_uleb128 (r, NULL);
3186 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3187 dw2_asm_output_data_uleb128 (r, NULL);
3188 break;
3189
3190 case DW_CFA_def_cfa_offset:
3191 case DW_CFA_GNU_args_size:
3192 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3193 break;
3194
3195 case DW_CFA_def_cfa_offset_sf:
3196 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3197 dw2_asm_output_data_sleb128 (off, NULL);
3198 break;
3199
3200 case DW_CFA_GNU_window_save:
3201 break;
3202
3203 case DW_CFA_def_cfa_expression:
3204 case DW_CFA_expression:
3205 output_cfa_loc (cfi, for_eh);
3206 break;
3207
3208 case DW_CFA_GNU_negative_offset_extended:
3209 /* Obsoleted by DW_CFA_offset_extended_sf. */
3210 gcc_unreachable ();
3211
3212 default:
3213 break;
3214 }
3215 }
3216 }
3217
3218 /* Similar, but do it via assembler directives instead. */
3219
3220 void
3221 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3222 {
3223 unsigned long r, r2;
3224
3225 switch (cfi->dw_cfi_opc)
3226 {
3227 case DW_CFA_advance_loc:
3228 case DW_CFA_advance_loc1:
3229 case DW_CFA_advance_loc2:
3230 case DW_CFA_advance_loc4:
3231 case DW_CFA_MIPS_advance_loc8:
3232 case DW_CFA_set_loc:
3233 /* Should only be created in a code path not followed when emitting
3234 via directives. The assembler is going to take care of this for
3235 us. But this routines is also used for debugging dumps, so
3236 print something. */
3237 gcc_assert (f != asm_out_file);
3238 fprintf (f, "\t.cfi_advance_loc\n");
3239 break;
3240
3241 case DW_CFA_offset:
3242 case DW_CFA_offset_extended:
3243 case DW_CFA_offset_extended_sf:
3244 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3245 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3246 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3247 break;
3248
3249 case DW_CFA_restore:
3250 case DW_CFA_restore_extended:
3251 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3252 fprintf (f, "\t.cfi_restore %lu\n", r);
3253 break;
3254
3255 case DW_CFA_undefined:
3256 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3257 fprintf (f, "\t.cfi_undefined %lu\n", r);
3258 break;
3259
3260 case DW_CFA_same_value:
3261 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3262 fprintf (f, "\t.cfi_same_value %lu\n", r);
3263 break;
3264
3265 case DW_CFA_def_cfa:
3266 case DW_CFA_def_cfa_sf:
3267 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3268 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3269 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3270 break;
3271
3272 case DW_CFA_def_cfa_register:
3273 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3274 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3275 break;
3276
3277 case DW_CFA_register:
3278 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3279 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3280 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3281 break;
3282
3283 case DW_CFA_def_cfa_offset:
3284 case DW_CFA_def_cfa_offset_sf:
3285 fprintf (f, "\t.cfi_def_cfa_offset "
3286 HOST_WIDE_INT_PRINT_DEC"\n",
3287 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3288 break;
3289
3290 case DW_CFA_remember_state:
3291 fprintf (f, "\t.cfi_remember_state\n");
3292 break;
3293 case DW_CFA_restore_state:
3294 fprintf (f, "\t.cfi_restore_state\n");
3295 break;
3296
3297 case DW_CFA_GNU_args_size:
3298 if (f == asm_out_file)
3299 {
3300 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3301 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3302 if (flag_debug_asm)
3303 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3304 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3305 fputc ('\n', f);
3306 }
3307 else
3308 {
3309 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3310 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3311 }
3312 break;
3313
3314 case DW_CFA_GNU_window_save:
3315 fprintf (f, "\t.cfi_window_save\n");
3316 break;
3317
3318 case DW_CFA_def_cfa_expression:
3319 if (f != asm_out_file)
3320 {
3321 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3322 break;
3323 }
3324 /* FALLTHRU */
3325 case DW_CFA_expression:
3326 if (f != asm_out_file)
3327 {
3328 fprintf (f, "\t.cfi_cfa_expression ...\n");
3329 break;
3330 }
3331 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3332 output_cfa_loc_raw (cfi);
3333 fputc ('\n', f);
3334 break;
3335
3336 default:
3337 gcc_unreachable ();
3338 }
3339 }
3340
3341 void
3342 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3343 {
3344 if (dwarf2out_do_cfi_asm ())
3345 output_cfi_directive (asm_out_file, cfi);
3346 }
3347
3348 static void
3349 dump_cfi_row (FILE *f, dw_cfi_row *row)
3350 {
3351 dw_cfi_ref cfi;
3352 unsigned i;
3353
3354 cfi = row->cfa_cfi;
3355 if (!cfi)
3356 {
3357 dw_cfa_location dummy;
3358 memset (&dummy, 0, sizeof (dummy));
3359 dummy.reg = INVALID_REGNUM;
3360 cfi = def_cfa_0 (&dummy, &row->cfa);
3361 }
3362 output_cfi_directive (f, cfi);
3363
3364 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3365 if (cfi)
3366 output_cfi_directive (f, cfi);
3367 }
3368
3369 void debug_cfi_row (dw_cfi_row *row);
3370
3371 void
3372 debug_cfi_row (dw_cfi_row *row)
3373 {
3374 dump_cfi_row (stderr, row);
3375 }
3376 \f
3377
3378 /* Save the result of dwarf2out_do_frame across PCH.
3379 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3380 static GTY(()) signed char saved_do_cfi_asm = 0;
3381
3382 /* Decide whether we want to emit frame unwind information for the current
3383 translation unit. */
3384
3385 bool
3386 dwarf2out_do_frame (void)
3387 {
3388 /* We want to emit correct CFA location expressions or lists, so we
3389 have to return true if we're going to output debug info, even if
3390 we're not going to output frame or unwind info. */
3391 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3392 return true;
3393
3394 if (saved_do_cfi_asm > 0)
3395 return true;
3396
3397 if (targetm.debug_unwind_info () == UI_DWARF2)
3398 return true;
3399
3400 if ((flag_unwind_tables || flag_exceptions)
3401 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3402 return true;
3403
3404 return false;
3405 }
3406
3407 /* Decide whether to emit frame unwind via assembler directives. */
3408
3409 bool
3410 dwarf2out_do_cfi_asm (void)
3411 {
3412 int enc;
3413
3414 if (saved_do_cfi_asm != 0)
3415 return saved_do_cfi_asm > 0;
3416
3417 /* Assume failure for a moment. */
3418 saved_do_cfi_asm = -1;
3419
3420 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3421 return false;
3422 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3423 return false;
3424
3425 /* Make sure the personality encoding is one the assembler can support.
3426 In particular, aligned addresses can't be handled. */
3427 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3428 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3429 return false;
3430 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3431 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3432 return false;
3433
3434 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3435 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3436 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3437 && !flag_unwind_tables && !flag_exceptions
3438 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3439 return false;
3440
3441 /* Success! */
3442 saved_do_cfi_asm = 1;
3443 return true;
3444 }
3445
3446 namespace {
3447
3448 const pass_data pass_data_dwarf2_frame =
3449 {
3450 RTL_PASS, /* type */
3451 "dwarf2", /* name */
3452 OPTGROUP_NONE, /* optinfo_flags */
3453 TV_FINAL, /* tv_id */
3454 0, /* properties_required */
3455 0, /* properties_provided */
3456 0, /* properties_destroyed */
3457 0, /* todo_flags_start */
3458 0, /* todo_flags_finish */
3459 };
3460
3461 class pass_dwarf2_frame : public rtl_opt_pass
3462 {
3463 public:
3464 pass_dwarf2_frame (gcc::context *ctxt)
3465 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3466 {}
3467
3468 /* opt_pass methods: */
3469 virtual bool gate (function *);
3470 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3471
3472 }; // class pass_dwarf2_frame
3473
3474 bool
3475 pass_dwarf2_frame::gate (function *)
3476 {
3477 /* Targets which still implement the prologue in assembler text
3478 cannot use the generic dwarf2 unwinding. */
3479 if (!targetm.have_prologue ())
3480 return false;
3481
3482 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3483 from the optimized shrink-wrapping annotations that we will compute.
3484 For now, only produce the CFI notes for dwarf2. */
3485 return dwarf2out_do_frame ();
3486 }
3487
3488 } // anon namespace
3489
3490 rtl_opt_pass *
3491 make_pass_dwarf2_frame (gcc::context *ctxt)
3492 {
3493 return new pass_dwarf2_frame (ctxt);
3494 }
3495
3496 #include "gt-dwarf2cfi.h"