]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/dwarf2cfi.c
dwarf2cfi: Dump row differences before asserting
[thirdparty/gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "target.h"
24 #include "function.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tree-pass.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "emit-rtl.h"
31 #include "stor-layout.h"
32 #include "cfgbuild.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
36
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "expr.h" /* init_return_column_size */
39 #include "output.h" /* asm_out_file */
40 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
41
42
43 /* ??? Poison these here until it can be done generically. They've been
44 totally replaced in this file; make sure it stays that way. */
45 #undef DWARF2_UNWIND_INFO
46 #undef DWARF2_FRAME_INFO
47 #if (GCC_VERSION >= 3000)
48 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
49 #endif
50
51 #ifndef INCOMING_RETURN_ADDR_RTX
52 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
53 #endif
54
55 /* Maximum size (in bytes) of an artificially generated label. */
56 #define MAX_ARTIFICIAL_LABEL_BYTES 30
57 \f
58 /* A collected description of an entire row of the abstract CFI table. */
59 struct GTY(()) dw_cfi_row
60 {
61 /* The expression that computes the CFA, expressed in two different ways.
62 The CFA member for the simple cases, and the full CFI expression for
63 the complex cases. The later will be a DW_CFA_cfa_expression. */
64 dw_cfa_location cfa;
65 dw_cfi_ref cfa_cfi;
66
67 /* The expressions for any register column that is saved. */
68 cfi_vec reg_save;
69 };
70
71 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
72 struct GTY(()) reg_saved_in_data {
73 rtx orig_reg;
74 rtx saved_in_reg;
75 };
76
77
78 /* Since we no longer have a proper CFG, we're going to create a facsimile
79 of one on the fly while processing the frame-related insns.
80
81 We create dw_trace_info structures for each extended basic block beginning
82 and ending at a "save point". Save points are labels, barriers, certain
83 notes, and of course the beginning and end of the function.
84
85 As we encounter control transfer insns, we propagate the "current"
86 row state across the edges to the starts of traces. When checking is
87 enabled, we validate that we propagate the same data from all sources.
88
89 All traces are members of the TRACE_INFO array, in the order in which
90 they appear in the instruction stream.
91
92 All save points are present in the TRACE_INDEX hash, mapping the insn
93 starting a trace to the dw_trace_info describing the trace. */
94
95 struct dw_trace_info
96 {
97 /* The insn that begins the trace. */
98 rtx_insn *head;
99
100 /* The row state at the beginning and end of the trace. */
101 dw_cfi_row *beg_row, *end_row;
102
103 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
104 while scanning insns. However, the args_size value is irrelevant at
105 any point except can_throw_internal_p insns. Therefore the "delay"
106 sizes the values that must actually be emitted for this trace. */
107 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
108 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
109
110 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
111 rtx_insn *eh_head;
112
113 /* The following variables contain data used in interpreting frame related
114 expressions. These are not part of the "real" row state as defined by
115 Dwarf, but it seems like they need to be propagated into a trace in case
116 frame related expressions have been sunk. */
117 /* ??? This seems fragile. These variables are fragments of a larger
118 expression. If we do not keep the entire expression together, we risk
119 not being able to put it together properly. Consider forcing targets
120 to generate self-contained expressions and dropping all of the magic
121 interpretation code in this file. Or at least refusing to shrink wrap
122 any frame related insn that doesn't contain a complete expression. */
123
124 /* The register used for saving registers to the stack, and its offset
125 from the CFA. */
126 dw_cfa_location cfa_store;
127
128 /* A temporary register holding an integral value used in adjusting SP
129 or setting up the store_reg. The "offset" field holds the integer
130 value, not an offset. */
131 dw_cfa_location cfa_temp;
132
133 /* A set of registers saved in other registers. This is the inverse of
134 the row->reg_save info, if the entry is a DW_CFA_register. This is
135 implemented as a flat array because it normally contains zero or 1
136 entry, depending on the target. IA-64 is the big spender here, using
137 a maximum of 5 entries. */
138 vec<reg_saved_in_data> regs_saved_in_regs;
139
140 /* An identifier for this trace. Used only for debugging dumps. */
141 unsigned id;
142
143 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
144 bool switch_sections;
145
146 /* True if we've seen different values incoming to beg_true_args_size. */
147 bool args_size_undefined;
148 };
149
150
151 /* Hashtable helpers. */
152
153 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
154 {
155 static inline hashval_t hash (const dw_trace_info *);
156 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
157 };
158
159 inline hashval_t
160 trace_info_hasher::hash (const dw_trace_info *ti)
161 {
162 return INSN_UID (ti->head);
163 }
164
165 inline bool
166 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
167 {
168 return a->head == b->head;
169 }
170
171
172 /* The variables making up the pseudo-cfg, as described above. */
173 static vec<dw_trace_info> trace_info;
174 static vec<dw_trace_info *> trace_work_list;
175 static hash_table<trace_info_hasher> *trace_index;
176
177 /* A vector of call frame insns for the CIE. */
178 cfi_vec cie_cfi_vec;
179
180 /* The state of the first row of the FDE table, which includes the
181 state provided by the CIE. */
182 static GTY(()) dw_cfi_row *cie_cfi_row;
183
184 static GTY(()) reg_saved_in_data *cie_return_save;
185
186 static GTY(()) unsigned long dwarf2out_cfi_label_num;
187
188 /* The insn after which a new CFI note should be emitted. */
189 static rtx_insn *add_cfi_insn;
190
191 /* When non-null, add_cfi will add the CFI to this vector. */
192 static cfi_vec *add_cfi_vec;
193
194 /* The current instruction trace. */
195 static dw_trace_info *cur_trace;
196
197 /* The current, i.e. most recently generated, row of the CFI table. */
198 static dw_cfi_row *cur_row;
199
200 /* A copy of the current CFA, for use during the processing of a
201 single insn. */
202 static dw_cfa_location *cur_cfa;
203
204 /* We delay emitting a register save until either (a) we reach the end
205 of the prologue or (b) the register is clobbered. This clusters
206 register saves so that there are fewer pc advances. */
207
208 struct queued_reg_save {
209 rtx reg;
210 rtx saved_reg;
211 HOST_WIDE_INT cfa_offset;
212 };
213
214
215 static vec<queued_reg_save> queued_reg_saves;
216
217 /* True if any CFI directives were emitted at the current insn. */
218 static bool any_cfis_emitted;
219
220 /* Short-hand for commonly used register numbers. */
221 static unsigned dw_stack_pointer_regnum;
222 static unsigned dw_frame_pointer_regnum;
223 \f
224 /* Hook used by __throw. */
225
226 rtx
227 expand_builtin_dwarf_sp_column (void)
228 {
229 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
230 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
231 }
232
233 /* MEM is a memory reference for the register size table, each element of
234 which has mode MODE. Initialize column C as a return address column. */
235
236 static void
237 init_return_column_size (machine_mode mode, rtx mem, unsigned int c)
238 {
239 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
240 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
241 emit_move_insn (adjust_address (mem, mode, offset),
242 gen_int_mode (size, mode));
243 }
244
245 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
246 init_one_dwarf_reg_size to communicate on what has been done by the
247 latter. */
248
249 struct init_one_dwarf_reg_state
250 {
251 /* Whether the dwarf return column was initialized. */
252 bool wrote_return_column;
253
254 /* For each hard register REGNO, whether init_one_dwarf_reg_size
255 was given REGNO to process already. */
256 bool processed_regno [FIRST_PSEUDO_REGISTER];
257
258 };
259
260 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
261 initialize the dwarf register size table entry corresponding to register
262 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
263 use for the size entry to initialize, and INIT_STATE is the communication
264 datastructure conveying what we're doing to our caller. */
265
266 static
267 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
268 rtx table, machine_mode slotmode,
269 init_one_dwarf_reg_state *init_state)
270 {
271 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
272 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
273 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
274
275 const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
276 const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
277
278 init_state->processed_regno[regno] = true;
279
280 if (rnum >= DWARF_FRAME_REGISTERS)
281 return;
282
283 if (dnum == DWARF_FRAME_RETURN_COLUMN)
284 {
285 if (regmode == VOIDmode)
286 return;
287 init_state->wrote_return_column = true;
288 }
289
290 if (slotoffset < 0)
291 return;
292
293 emit_move_insn (adjust_address (table, slotmode, slotoffset),
294 gen_int_mode (regsize, slotmode));
295 }
296
297 /* Generate code to initialize the dwarf register size table located
298 at the provided ADDRESS. */
299
300 void
301 expand_builtin_init_dwarf_reg_sizes (tree address)
302 {
303 unsigned int i;
304 machine_mode mode = TYPE_MODE (char_type_node);
305 rtx addr = expand_normal (address);
306 rtx mem = gen_rtx_MEM (BLKmode, addr);
307
308 init_one_dwarf_reg_state init_state;
309
310 memset ((char *)&init_state, 0, sizeof (init_state));
311
312 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
313 {
314 machine_mode save_mode;
315 rtx span;
316
317 /* No point in processing a register multiple times. This could happen
318 with register spans, e.g. when a reg is first processed as a piece of
319 a span, then as a register on its own later on. */
320
321 if (init_state.processed_regno[i])
322 continue;
323
324 save_mode = targetm.dwarf_frame_reg_mode (i);
325 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
326
327 if (!span)
328 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
329 else
330 {
331 for (int si = 0; si < XVECLEN (span, 0); si++)
332 {
333 rtx reg = XVECEXP (span, 0, si);
334
335 init_one_dwarf_reg_size
336 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
337 }
338 }
339 }
340
341 if (!init_state.wrote_return_column)
342 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
343
344 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
345 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
346 #endif
347
348 targetm.init_dwarf_reg_sizes_extra (address);
349 }
350
351 \f
352 static dw_trace_info *
353 get_trace_info (rtx_insn *insn)
354 {
355 dw_trace_info dummy;
356 dummy.head = insn;
357 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
358 }
359
360 static bool
361 save_point_p (rtx_insn *insn)
362 {
363 /* Labels, except those that are really jump tables. */
364 if (LABEL_P (insn))
365 return inside_basic_block_p (insn);
366
367 /* We split traces at the prologue/epilogue notes because those
368 are points at which the unwind info is usually stable. This
369 makes it easier to find spots with identical unwind info so
370 that we can use remember/restore_state opcodes. */
371 if (NOTE_P (insn))
372 switch (NOTE_KIND (insn))
373 {
374 case NOTE_INSN_PROLOGUE_END:
375 case NOTE_INSN_EPILOGUE_BEG:
376 return true;
377 }
378
379 return false;
380 }
381
382 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
383
384 static inline HOST_WIDE_INT
385 div_data_align (HOST_WIDE_INT off)
386 {
387 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
388 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
389 return r;
390 }
391
392 /* Return true if we need a signed version of a given opcode
393 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
394
395 static inline bool
396 need_data_align_sf_opcode (HOST_WIDE_INT off)
397 {
398 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
399 }
400
401 /* Return a pointer to a newly allocated Call Frame Instruction. */
402
403 static inline dw_cfi_ref
404 new_cfi (void)
405 {
406 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
407
408 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
409 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
410
411 return cfi;
412 }
413
414 /* Return a newly allocated CFI row, with no defined data. */
415
416 static dw_cfi_row *
417 new_cfi_row (void)
418 {
419 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
420
421 row->cfa.reg = INVALID_REGNUM;
422
423 return row;
424 }
425
426 /* Return a copy of an existing CFI row. */
427
428 static dw_cfi_row *
429 copy_cfi_row (dw_cfi_row *src)
430 {
431 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
432
433 *dst = *src;
434 dst->reg_save = vec_safe_copy (src->reg_save);
435
436 return dst;
437 }
438
439 /* Generate a new label for the CFI info to refer to. */
440
441 static char *
442 dwarf2out_cfi_label (void)
443 {
444 int num = dwarf2out_cfi_label_num++;
445 char label[20];
446
447 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
448
449 return xstrdup (label);
450 }
451
452 /* Add CFI either to the current insn stream or to a vector, or both. */
453
454 static void
455 add_cfi (dw_cfi_ref cfi)
456 {
457 any_cfis_emitted = true;
458
459 if (add_cfi_insn != NULL)
460 {
461 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
462 NOTE_CFI (add_cfi_insn) = cfi;
463 }
464
465 if (add_cfi_vec != NULL)
466 vec_safe_push (*add_cfi_vec, cfi);
467 }
468
469 static void
470 add_cfi_args_size (HOST_WIDE_INT size)
471 {
472 dw_cfi_ref cfi = new_cfi ();
473
474 /* While we can occasionally have args_size < 0 internally, this state
475 should not persist at a point we actually need an opcode. */
476 gcc_assert (size >= 0);
477
478 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
479 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
480
481 add_cfi (cfi);
482 }
483
484 static void
485 add_cfi_restore (unsigned reg)
486 {
487 dw_cfi_ref cfi = new_cfi ();
488
489 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
490 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
491
492 add_cfi (cfi);
493 }
494
495 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
496 that the register column is no longer saved. */
497
498 static void
499 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
500 {
501 if (vec_safe_length (row->reg_save) <= column)
502 vec_safe_grow_cleared (row->reg_save, column + 1);
503 (*row->reg_save)[column] = cfi;
504 }
505
506 /* This function fills in aa dw_cfa_location structure from a dwarf location
507 descriptor sequence. */
508
509 static void
510 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
511 {
512 struct dw_loc_descr_node *ptr;
513 cfa->offset = 0;
514 cfa->base_offset = 0;
515 cfa->indirect = 0;
516 cfa->reg = -1;
517
518 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
519 {
520 enum dwarf_location_atom op = ptr->dw_loc_opc;
521
522 switch (op)
523 {
524 case DW_OP_reg0:
525 case DW_OP_reg1:
526 case DW_OP_reg2:
527 case DW_OP_reg3:
528 case DW_OP_reg4:
529 case DW_OP_reg5:
530 case DW_OP_reg6:
531 case DW_OP_reg7:
532 case DW_OP_reg8:
533 case DW_OP_reg9:
534 case DW_OP_reg10:
535 case DW_OP_reg11:
536 case DW_OP_reg12:
537 case DW_OP_reg13:
538 case DW_OP_reg14:
539 case DW_OP_reg15:
540 case DW_OP_reg16:
541 case DW_OP_reg17:
542 case DW_OP_reg18:
543 case DW_OP_reg19:
544 case DW_OP_reg20:
545 case DW_OP_reg21:
546 case DW_OP_reg22:
547 case DW_OP_reg23:
548 case DW_OP_reg24:
549 case DW_OP_reg25:
550 case DW_OP_reg26:
551 case DW_OP_reg27:
552 case DW_OP_reg28:
553 case DW_OP_reg29:
554 case DW_OP_reg30:
555 case DW_OP_reg31:
556 cfa->reg = op - DW_OP_reg0;
557 break;
558 case DW_OP_regx:
559 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
560 break;
561 case DW_OP_breg0:
562 case DW_OP_breg1:
563 case DW_OP_breg2:
564 case DW_OP_breg3:
565 case DW_OP_breg4:
566 case DW_OP_breg5:
567 case DW_OP_breg6:
568 case DW_OP_breg7:
569 case DW_OP_breg8:
570 case DW_OP_breg9:
571 case DW_OP_breg10:
572 case DW_OP_breg11:
573 case DW_OP_breg12:
574 case DW_OP_breg13:
575 case DW_OP_breg14:
576 case DW_OP_breg15:
577 case DW_OP_breg16:
578 case DW_OP_breg17:
579 case DW_OP_breg18:
580 case DW_OP_breg19:
581 case DW_OP_breg20:
582 case DW_OP_breg21:
583 case DW_OP_breg22:
584 case DW_OP_breg23:
585 case DW_OP_breg24:
586 case DW_OP_breg25:
587 case DW_OP_breg26:
588 case DW_OP_breg27:
589 case DW_OP_breg28:
590 case DW_OP_breg29:
591 case DW_OP_breg30:
592 case DW_OP_breg31:
593 cfa->reg = op - DW_OP_breg0;
594 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
595 break;
596 case DW_OP_bregx:
597 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
598 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
599 break;
600 case DW_OP_deref:
601 cfa->indirect = 1;
602 break;
603 case DW_OP_plus_uconst:
604 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
605 break;
606 default:
607 gcc_unreachable ();
608 }
609 }
610 }
611
612 /* Find the previous value for the CFA, iteratively. CFI is the opcode
613 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
614 one level of remember/restore state processing. */
615
616 void
617 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
618 {
619 switch (cfi->dw_cfi_opc)
620 {
621 case DW_CFA_def_cfa_offset:
622 case DW_CFA_def_cfa_offset_sf:
623 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
624 break;
625 case DW_CFA_def_cfa_register:
626 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
627 break;
628 case DW_CFA_def_cfa:
629 case DW_CFA_def_cfa_sf:
630 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
631 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
632 break;
633 case DW_CFA_def_cfa_expression:
634 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
635 break;
636
637 case DW_CFA_remember_state:
638 gcc_assert (!remember->in_use);
639 *remember = *loc;
640 remember->in_use = 1;
641 break;
642 case DW_CFA_restore_state:
643 gcc_assert (remember->in_use);
644 *loc = *remember;
645 remember->in_use = 0;
646 break;
647
648 default:
649 break;
650 }
651 }
652
653 /* Determine if two dw_cfa_location structures define the same data. */
654
655 bool
656 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
657 {
658 return (loc1->reg == loc2->reg
659 && loc1->offset == loc2->offset
660 && loc1->indirect == loc2->indirect
661 && (loc1->indirect == 0
662 || loc1->base_offset == loc2->base_offset));
663 }
664
665 /* Determine if two CFI operands are identical. */
666
667 static bool
668 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
669 {
670 switch (t)
671 {
672 case dw_cfi_oprnd_unused:
673 return true;
674 case dw_cfi_oprnd_reg_num:
675 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
676 case dw_cfi_oprnd_offset:
677 return a->dw_cfi_offset == b->dw_cfi_offset;
678 case dw_cfi_oprnd_addr:
679 return (a->dw_cfi_addr == b->dw_cfi_addr
680 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
681 case dw_cfi_oprnd_loc:
682 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
683 }
684 gcc_unreachable ();
685 }
686
687 /* Determine if two CFI entries are identical. */
688
689 static bool
690 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
691 {
692 enum dwarf_call_frame_info opc;
693
694 /* Make things easier for our callers, including missing operands. */
695 if (a == b)
696 return true;
697 if (a == NULL || b == NULL)
698 return false;
699
700 /* Obviously, the opcodes must match. */
701 opc = a->dw_cfi_opc;
702 if (opc != b->dw_cfi_opc)
703 return false;
704
705 /* Compare the two operands, re-using the type of the operands as
706 already exposed elsewhere. */
707 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
708 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
709 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
710 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
711 }
712
713 /* Determine if two CFI_ROW structures are identical. */
714
715 static bool
716 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
717 {
718 size_t i, n_a, n_b, n_max;
719
720 if (a->cfa_cfi)
721 {
722 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
723 return false;
724 }
725 else if (!cfa_equal_p (&a->cfa, &b->cfa))
726 return false;
727
728 n_a = vec_safe_length (a->reg_save);
729 n_b = vec_safe_length (b->reg_save);
730 n_max = MAX (n_a, n_b);
731
732 for (i = 0; i < n_max; ++i)
733 {
734 dw_cfi_ref r_a = NULL, r_b = NULL;
735
736 if (i < n_a)
737 r_a = (*a->reg_save)[i];
738 if (i < n_b)
739 r_b = (*b->reg_save)[i];
740
741 if (!cfi_equal_p (r_a, r_b))
742 return false;
743 }
744
745 return true;
746 }
747
748 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
749 what opcode to emit. Returns the CFI opcode to effect the change, or
750 NULL if NEW_CFA == OLD_CFA. */
751
752 static dw_cfi_ref
753 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
754 {
755 dw_cfi_ref cfi;
756
757 /* If nothing changed, no need to issue any call frame instructions. */
758 if (cfa_equal_p (old_cfa, new_cfa))
759 return NULL;
760
761 cfi = new_cfi ();
762
763 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
764 {
765 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
766 the CFA register did not change but the offset did. The data
767 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
768 in the assembler via the .cfi_def_cfa_offset directive. */
769 if (new_cfa->offset < 0)
770 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
771 else
772 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
773 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
774 }
775 else if (new_cfa->offset == old_cfa->offset
776 && old_cfa->reg != INVALID_REGNUM
777 && !new_cfa->indirect
778 && !old_cfa->indirect)
779 {
780 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
781 indicating the CFA register has changed to <register> but the
782 offset has not changed. */
783 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
784 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
785 }
786 else if (new_cfa->indirect == 0)
787 {
788 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
789 indicating the CFA register has changed to <register> with
790 the specified offset. The data factoring for DW_CFA_def_cfa_sf
791 happens in output_cfi, or in the assembler via the .cfi_def_cfa
792 directive. */
793 if (new_cfa->offset < 0)
794 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
795 else
796 cfi->dw_cfi_opc = DW_CFA_def_cfa;
797 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
798 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
799 }
800 else
801 {
802 /* Construct a DW_CFA_def_cfa_expression instruction to
803 calculate the CFA using a full location expression since no
804 register-offset pair is available. */
805 struct dw_loc_descr_node *loc_list;
806
807 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
808 loc_list = build_cfa_loc (new_cfa, 0);
809 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
810 }
811
812 return cfi;
813 }
814
815 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
816
817 static void
818 def_cfa_1 (dw_cfa_location *new_cfa)
819 {
820 dw_cfi_ref cfi;
821
822 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
823 cur_trace->cfa_store.offset = new_cfa->offset;
824
825 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
826 if (cfi)
827 {
828 cur_row->cfa = *new_cfa;
829 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
830 ? cfi : NULL);
831
832 add_cfi (cfi);
833 }
834 }
835
836 /* Add the CFI for saving a register. REG is the CFA column number.
837 If SREG is -1, the register is saved at OFFSET from the CFA;
838 otherwise it is saved in SREG. */
839
840 static void
841 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
842 {
843 dw_fde_ref fde = cfun ? cfun->fde : NULL;
844 dw_cfi_ref cfi = new_cfi ();
845
846 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
847
848 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
849 if (fde
850 && fde->stack_realign
851 && sreg == INVALID_REGNUM)
852 {
853 cfi->dw_cfi_opc = DW_CFA_expression;
854 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
855 cfi->dw_cfi_oprnd2.dw_cfi_loc
856 = build_cfa_aligned_loc (&cur_row->cfa, offset,
857 fde->stack_realignment);
858 }
859 else if (sreg == INVALID_REGNUM)
860 {
861 if (need_data_align_sf_opcode (offset))
862 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
863 else if (reg & ~0x3f)
864 cfi->dw_cfi_opc = DW_CFA_offset_extended;
865 else
866 cfi->dw_cfi_opc = DW_CFA_offset;
867 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
868 }
869 else if (sreg == reg)
870 {
871 /* While we could emit something like DW_CFA_same_value or
872 DW_CFA_restore, we never expect to see something like that
873 in a prologue. This is more likely to be a bug. A backend
874 can always bypass this by using REG_CFA_RESTORE directly. */
875 gcc_unreachable ();
876 }
877 else
878 {
879 cfi->dw_cfi_opc = DW_CFA_register;
880 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
881 }
882
883 add_cfi (cfi);
884 update_row_reg_save (cur_row, reg, cfi);
885 }
886
887 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
888 and adjust data structures to match. */
889
890 static void
891 notice_args_size (rtx_insn *insn)
892 {
893 HOST_WIDE_INT args_size, delta;
894 rtx note;
895
896 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
897 if (note == NULL)
898 return;
899
900 args_size = INTVAL (XEXP (note, 0));
901 delta = args_size - cur_trace->end_true_args_size;
902 if (delta == 0)
903 return;
904
905 cur_trace->end_true_args_size = args_size;
906
907 /* If the CFA is computed off the stack pointer, then we must adjust
908 the computation of the CFA as well. */
909 if (cur_cfa->reg == dw_stack_pointer_regnum)
910 {
911 gcc_assert (!cur_cfa->indirect);
912
913 /* Convert a change in args_size (always a positive in the
914 direction of stack growth) to a change in stack pointer. */
915 if (!STACK_GROWS_DOWNWARD)
916 delta = -delta;
917
918 cur_cfa->offset += delta;
919 }
920 }
921
922 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
923 data within the trace related to EH insns and args_size. */
924
925 static void
926 notice_eh_throw (rtx_insn *insn)
927 {
928 HOST_WIDE_INT args_size;
929
930 args_size = cur_trace->end_true_args_size;
931 if (cur_trace->eh_head == NULL)
932 {
933 cur_trace->eh_head = insn;
934 cur_trace->beg_delay_args_size = args_size;
935 cur_trace->end_delay_args_size = args_size;
936 }
937 else if (cur_trace->end_delay_args_size != args_size)
938 {
939 cur_trace->end_delay_args_size = args_size;
940
941 /* ??? If the CFA is the stack pointer, search backward for the last
942 CFI note and insert there. Given that the stack changed for the
943 args_size change, there *must* be such a note in between here and
944 the last eh insn. */
945 add_cfi_args_size (args_size);
946 }
947 }
948
949 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
950 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
951 used in places where rtl is prohibited. */
952
953 static inline unsigned
954 dwf_regno (const_rtx reg)
955 {
956 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
957 return DWARF_FRAME_REGNUM (REGNO (reg));
958 }
959
960 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
961
962 static bool
963 compare_reg_or_pc (rtx x, rtx y)
964 {
965 if (REG_P (x) && REG_P (y))
966 return REGNO (x) == REGNO (y);
967 return x == y;
968 }
969
970 /* Record SRC as being saved in DEST. DEST may be null to delete an
971 existing entry. SRC may be a register or PC_RTX. */
972
973 static void
974 record_reg_saved_in_reg (rtx dest, rtx src)
975 {
976 reg_saved_in_data *elt;
977 size_t i;
978
979 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
980 if (compare_reg_or_pc (elt->orig_reg, src))
981 {
982 if (dest == NULL)
983 cur_trace->regs_saved_in_regs.unordered_remove (i);
984 else
985 elt->saved_in_reg = dest;
986 return;
987 }
988
989 if (dest == NULL)
990 return;
991
992 reg_saved_in_data e = {src, dest};
993 cur_trace->regs_saved_in_regs.safe_push (e);
994 }
995
996 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
997 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
998
999 static void
1000 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1001 {
1002 queued_reg_save *q;
1003 queued_reg_save e = {reg, sreg, offset};
1004 size_t i;
1005
1006 /* Duplicates waste space, but it's also necessary to remove them
1007 for correctness, since the queue gets output in reverse order. */
1008 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1009 if (compare_reg_or_pc (q->reg, reg))
1010 {
1011 *q = e;
1012 return;
1013 }
1014
1015 queued_reg_saves.safe_push (e);
1016 }
1017
1018 /* Output all the entries in QUEUED_REG_SAVES. */
1019
1020 static void
1021 dwarf2out_flush_queued_reg_saves (void)
1022 {
1023 queued_reg_save *q;
1024 size_t i;
1025
1026 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1027 {
1028 unsigned int reg, sreg;
1029
1030 record_reg_saved_in_reg (q->saved_reg, q->reg);
1031
1032 if (q->reg == pc_rtx)
1033 reg = DWARF_FRAME_RETURN_COLUMN;
1034 else
1035 reg = dwf_regno (q->reg);
1036 if (q->saved_reg)
1037 sreg = dwf_regno (q->saved_reg);
1038 else
1039 sreg = INVALID_REGNUM;
1040 reg_save (reg, sreg, q->cfa_offset);
1041 }
1042
1043 queued_reg_saves.truncate (0);
1044 }
1045
1046 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1047 location for? Or, does it clobber a register which we've previously
1048 said that some other register is saved in, and for which we now
1049 have a new location for? */
1050
1051 static bool
1052 clobbers_queued_reg_save (const_rtx insn)
1053 {
1054 queued_reg_save *q;
1055 size_t iq;
1056
1057 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1058 {
1059 size_t ir;
1060 reg_saved_in_data *rir;
1061
1062 if (modified_in_p (q->reg, insn))
1063 return true;
1064
1065 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1066 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1067 && modified_in_p (rir->saved_in_reg, insn))
1068 return true;
1069 }
1070
1071 return false;
1072 }
1073
1074 /* What register, if any, is currently saved in REG? */
1075
1076 static rtx
1077 reg_saved_in (rtx reg)
1078 {
1079 unsigned int regn = REGNO (reg);
1080 queued_reg_save *q;
1081 reg_saved_in_data *rir;
1082 size_t i;
1083
1084 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1085 if (q->saved_reg && regn == REGNO (q->saved_reg))
1086 return q->reg;
1087
1088 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1089 if (regn == REGNO (rir->saved_in_reg))
1090 return rir->orig_reg;
1091
1092 return NULL_RTX;
1093 }
1094
1095 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1096
1097 static void
1098 dwarf2out_frame_debug_def_cfa (rtx pat)
1099 {
1100 memset (cur_cfa, 0, sizeof (*cur_cfa));
1101
1102 if (GET_CODE (pat) == PLUS)
1103 {
1104 cur_cfa->offset = INTVAL (XEXP (pat, 1));
1105 pat = XEXP (pat, 0);
1106 }
1107 if (MEM_P (pat))
1108 {
1109 cur_cfa->indirect = 1;
1110 pat = XEXP (pat, 0);
1111 if (GET_CODE (pat) == PLUS)
1112 {
1113 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1114 pat = XEXP (pat, 0);
1115 }
1116 }
1117 /* ??? If this fails, we could be calling into the _loc functions to
1118 define a full expression. So far no port does that. */
1119 gcc_assert (REG_P (pat));
1120 cur_cfa->reg = dwf_regno (pat);
1121 }
1122
1123 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1124
1125 static void
1126 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1127 {
1128 rtx src, dest;
1129
1130 gcc_assert (GET_CODE (pat) == SET);
1131 dest = XEXP (pat, 0);
1132 src = XEXP (pat, 1);
1133
1134 switch (GET_CODE (src))
1135 {
1136 case PLUS:
1137 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1138 cur_cfa->offset -= INTVAL (XEXP (src, 1));
1139 break;
1140
1141 case REG:
1142 break;
1143
1144 default:
1145 gcc_unreachable ();
1146 }
1147
1148 cur_cfa->reg = dwf_regno (dest);
1149 gcc_assert (cur_cfa->indirect == 0);
1150 }
1151
1152 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1153
1154 static void
1155 dwarf2out_frame_debug_cfa_offset (rtx set)
1156 {
1157 HOST_WIDE_INT offset;
1158 rtx src, addr, span;
1159 unsigned int sregno;
1160
1161 src = XEXP (set, 1);
1162 addr = XEXP (set, 0);
1163 gcc_assert (MEM_P (addr));
1164 addr = XEXP (addr, 0);
1165
1166 /* As documented, only consider extremely simple addresses. */
1167 switch (GET_CODE (addr))
1168 {
1169 case REG:
1170 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1171 offset = -cur_cfa->offset;
1172 break;
1173 case PLUS:
1174 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1175 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1176 break;
1177 default:
1178 gcc_unreachable ();
1179 }
1180
1181 if (src == pc_rtx)
1182 {
1183 span = NULL;
1184 sregno = DWARF_FRAME_RETURN_COLUMN;
1185 }
1186 else
1187 {
1188 span = targetm.dwarf_register_span (src);
1189 sregno = dwf_regno (src);
1190 }
1191
1192 /* ??? We'd like to use queue_reg_save, but we need to come up with
1193 a different flushing heuristic for epilogues. */
1194 if (!span)
1195 reg_save (sregno, INVALID_REGNUM, offset);
1196 else
1197 {
1198 /* We have a PARALLEL describing where the contents of SRC live.
1199 Adjust the offset for each piece of the PARALLEL. */
1200 HOST_WIDE_INT span_offset = offset;
1201
1202 gcc_assert (GET_CODE (span) == PARALLEL);
1203
1204 const int par_len = XVECLEN (span, 0);
1205 for (int par_index = 0; par_index < par_len; par_index++)
1206 {
1207 rtx elem = XVECEXP (span, 0, par_index);
1208 sregno = dwf_regno (src);
1209 reg_save (sregno, INVALID_REGNUM, span_offset);
1210 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1211 }
1212 }
1213 }
1214
1215 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1216
1217 static void
1218 dwarf2out_frame_debug_cfa_register (rtx set)
1219 {
1220 rtx src, dest;
1221 unsigned sregno, dregno;
1222
1223 src = XEXP (set, 1);
1224 dest = XEXP (set, 0);
1225
1226 record_reg_saved_in_reg (dest, src);
1227 if (src == pc_rtx)
1228 sregno = DWARF_FRAME_RETURN_COLUMN;
1229 else
1230 sregno = dwf_regno (src);
1231
1232 dregno = dwf_regno (dest);
1233
1234 /* ??? We'd like to use queue_reg_save, but we need to come up with
1235 a different flushing heuristic for epilogues. */
1236 reg_save (sregno, dregno, 0);
1237 }
1238
1239 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1240
1241 static void
1242 dwarf2out_frame_debug_cfa_expression (rtx set)
1243 {
1244 rtx src, dest, span;
1245 dw_cfi_ref cfi = new_cfi ();
1246 unsigned regno;
1247
1248 dest = SET_DEST (set);
1249 src = SET_SRC (set);
1250
1251 gcc_assert (REG_P (src));
1252 gcc_assert (MEM_P (dest));
1253
1254 span = targetm.dwarf_register_span (src);
1255 gcc_assert (!span);
1256
1257 regno = dwf_regno (src);
1258
1259 cfi->dw_cfi_opc = DW_CFA_expression;
1260 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1261 cfi->dw_cfi_oprnd2.dw_cfi_loc
1262 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1263 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1264
1265 /* ??? We'd like to use queue_reg_save, were the interface different,
1266 and, as above, we could manage flushing for epilogues. */
1267 add_cfi (cfi);
1268 update_row_reg_save (cur_row, regno, cfi);
1269 }
1270
1271 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1272 note. */
1273
1274 static void
1275 dwarf2out_frame_debug_cfa_val_expression (rtx set)
1276 {
1277 rtx dest = SET_DEST (set);
1278 gcc_assert (REG_P (dest));
1279
1280 rtx span = targetm.dwarf_register_span (dest);
1281 gcc_assert (!span);
1282
1283 rtx src = SET_SRC (set);
1284 dw_cfi_ref cfi = new_cfi ();
1285 cfi->dw_cfi_opc = DW_CFA_val_expression;
1286 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest);
1287 cfi->dw_cfi_oprnd2.dw_cfi_loc
1288 = mem_loc_descriptor (src, GET_MODE (src),
1289 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1290 add_cfi (cfi);
1291 update_row_reg_save (cur_row, dwf_regno (dest), cfi);
1292 }
1293
1294 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1295
1296 static void
1297 dwarf2out_frame_debug_cfa_restore (rtx reg)
1298 {
1299 gcc_assert (REG_P (reg));
1300
1301 rtx span = targetm.dwarf_register_span (reg);
1302 if (!span)
1303 {
1304 unsigned int regno = dwf_regno (reg);
1305 add_cfi_restore (regno);
1306 update_row_reg_save (cur_row, regno, NULL);
1307 }
1308 else
1309 {
1310 /* We have a PARALLEL describing where the contents of REG live.
1311 Restore the register for each piece of the PARALLEL. */
1312 gcc_assert (GET_CODE (span) == PARALLEL);
1313
1314 const int par_len = XVECLEN (span, 0);
1315 for (int par_index = 0; par_index < par_len; par_index++)
1316 {
1317 reg = XVECEXP (span, 0, par_index);
1318 gcc_assert (REG_P (reg));
1319 unsigned int regno = dwf_regno (reg);
1320 add_cfi_restore (regno);
1321 update_row_reg_save (cur_row, regno, NULL);
1322 }
1323 }
1324 }
1325
1326 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1327 ??? Perhaps we should note in the CIE where windows are saved (instead of
1328 assuming 0(cfa)) and what registers are in the window. */
1329
1330 static void
1331 dwarf2out_frame_debug_cfa_window_save (void)
1332 {
1333 dw_cfi_ref cfi = new_cfi ();
1334
1335 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1336 add_cfi (cfi);
1337 }
1338
1339 /* Record call frame debugging information for an expression EXPR,
1340 which either sets SP or FP (adjusting how we calculate the frame
1341 address) or saves a register to the stack or another register.
1342 LABEL indicates the address of EXPR.
1343
1344 This function encodes a state machine mapping rtxes to actions on
1345 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1346 users need not read the source code.
1347
1348 The High-Level Picture
1349
1350 Changes in the register we use to calculate the CFA: Currently we
1351 assume that if you copy the CFA register into another register, we
1352 should take the other one as the new CFA register; this seems to
1353 work pretty well. If it's wrong for some target, it's simple
1354 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1355
1356 Changes in the register we use for saving registers to the stack:
1357 This is usually SP, but not always. Again, we deduce that if you
1358 copy SP into another register (and SP is not the CFA register),
1359 then the new register is the one we will be using for register
1360 saves. This also seems to work.
1361
1362 Register saves: There's not much guesswork about this one; if
1363 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1364 register save, and the register used to calculate the destination
1365 had better be the one we think we're using for this purpose.
1366 It's also assumed that a copy from a call-saved register to another
1367 register is saving that register if RTX_FRAME_RELATED_P is set on
1368 that instruction. If the copy is from a call-saved register to
1369 the *same* register, that means that the register is now the same
1370 value as in the caller.
1371
1372 Except: If the register being saved is the CFA register, and the
1373 offset is nonzero, we are saving the CFA, so we assume we have to
1374 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1375 the intent is to save the value of SP from the previous frame.
1376
1377 In addition, if a register has previously been saved to a different
1378 register,
1379
1380 Invariants / Summaries of Rules
1381
1382 cfa current rule for calculating the CFA. It usually
1383 consists of a register and an offset. This is
1384 actually stored in *cur_cfa, but abbreviated
1385 for the purposes of this documentation.
1386 cfa_store register used by prologue code to save things to the stack
1387 cfa_store.offset is the offset from the value of
1388 cfa_store.reg to the actual CFA
1389 cfa_temp register holding an integral value. cfa_temp.offset
1390 stores the value, which will be used to adjust the
1391 stack pointer. cfa_temp is also used like cfa_store,
1392 to track stores to the stack via fp or a temp reg.
1393
1394 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1395 with cfa.reg as the first operand changes the cfa.reg and its
1396 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1397 cfa_temp.offset.
1398
1399 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1400 expression yielding a constant. This sets cfa_temp.reg
1401 and cfa_temp.offset.
1402
1403 Rule 5: Create a new register cfa_store used to save items to the
1404 stack.
1405
1406 Rules 10-14: Save a register to the stack. Define offset as the
1407 difference of the original location and cfa_store's
1408 location (or cfa_temp's location if cfa_temp is used).
1409
1410 Rules 16-20: If AND operation happens on sp in prologue, we assume
1411 stack is realigned. We will use a group of DW_OP_XXX
1412 expressions to represent the location of the stored
1413 register instead of CFA+offset.
1414
1415 The Rules
1416
1417 "{a,b}" indicates a choice of a xor b.
1418 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1419
1420 Rule 1:
1421 (set <reg1> <reg2>:cfa.reg)
1422 effects: cfa.reg = <reg1>
1423 cfa.offset unchanged
1424 cfa_temp.reg = <reg1>
1425 cfa_temp.offset = cfa.offset
1426
1427 Rule 2:
1428 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1429 {<const_int>,<reg>:cfa_temp.reg}))
1430 effects: cfa.reg = sp if fp used
1431 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1432 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1433 if cfa_store.reg==sp
1434
1435 Rule 3:
1436 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1437 effects: cfa.reg = fp
1438 cfa_offset += +/- <const_int>
1439
1440 Rule 4:
1441 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1442 constraints: <reg1> != fp
1443 <reg1> != sp
1444 effects: cfa.reg = <reg1>
1445 cfa_temp.reg = <reg1>
1446 cfa_temp.offset = cfa.offset
1447
1448 Rule 5:
1449 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1450 constraints: <reg1> != fp
1451 <reg1> != sp
1452 effects: cfa_store.reg = <reg1>
1453 cfa_store.offset = cfa.offset - cfa_temp.offset
1454
1455 Rule 6:
1456 (set <reg> <const_int>)
1457 effects: cfa_temp.reg = <reg>
1458 cfa_temp.offset = <const_int>
1459
1460 Rule 7:
1461 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1462 effects: cfa_temp.reg = <reg1>
1463 cfa_temp.offset |= <const_int>
1464
1465 Rule 8:
1466 (set <reg> (high <exp>))
1467 effects: none
1468
1469 Rule 9:
1470 (set <reg> (lo_sum <exp> <const_int>))
1471 effects: cfa_temp.reg = <reg>
1472 cfa_temp.offset = <const_int>
1473
1474 Rule 10:
1475 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1476 effects: cfa_store.offset -= <const_int>
1477 cfa.offset = cfa_store.offset if cfa.reg == sp
1478 cfa.reg = sp
1479 cfa.base_offset = -cfa_store.offset
1480
1481 Rule 11:
1482 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1483 effects: cfa_store.offset += -/+ mode_size(mem)
1484 cfa.offset = cfa_store.offset if cfa.reg == sp
1485 cfa.reg = sp
1486 cfa.base_offset = -cfa_store.offset
1487
1488 Rule 12:
1489 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1490
1491 <reg2>)
1492 effects: cfa.reg = <reg1>
1493 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1494
1495 Rule 13:
1496 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1497 effects: cfa.reg = <reg1>
1498 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1499
1500 Rule 14:
1501 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1502 effects: cfa.reg = <reg1>
1503 cfa.base_offset = -cfa_temp.offset
1504 cfa_temp.offset -= mode_size(mem)
1505
1506 Rule 15:
1507 (set <reg> {unspec, unspec_volatile})
1508 effects: target-dependent
1509
1510 Rule 16:
1511 (set sp (and: sp <const_int>))
1512 constraints: cfa_store.reg == sp
1513 effects: cfun->fde.stack_realign = 1
1514 cfa_store.offset = 0
1515 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1516
1517 Rule 17:
1518 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1519 effects: cfa_store.offset += -/+ mode_size(mem)
1520
1521 Rule 18:
1522 (set (mem ({pre_inc, pre_dec} sp)) fp)
1523 constraints: fde->stack_realign == 1
1524 effects: cfa_store.offset = 0
1525 cfa.reg != HARD_FRAME_POINTER_REGNUM
1526
1527 Rule 19:
1528 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1529 constraints: fde->stack_realign == 1
1530 && cfa.offset == 0
1531 && cfa.indirect == 0
1532 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1533 effects: Use DW_CFA_def_cfa_expression to define cfa
1534 cfa.reg == fde->drap_reg */
1535
1536 static void
1537 dwarf2out_frame_debug_expr (rtx expr)
1538 {
1539 rtx src, dest, span;
1540 HOST_WIDE_INT offset;
1541 dw_fde_ref fde;
1542
1543 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1544 the PARALLEL independently. The first element is always processed if
1545 it is a SET. This is for backward compatibility. Other elements
1546 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1547 flag is set in them. */
1548 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1549 {
1550 int par_index;
1551 int limit = XVECLEN (expr, 0);
1552 rtx elem;
1553
1554 /* PARALLELs have strict read-modify-write semantics, so we
1555 ought to evaluate every rvalue before changing any lvalue.
1556 It's cumbersome to do that in general, but there's an
1557 easy approximation that is enough for all current users:
1558 handle register saves before register assignments. */
1559 if (GET_CODE (expr) == PARALLEL)
1560 for (par_index = 0; par_index < limit; par_index++)
1561 {
1562 elem = XVECEXP (expr, 0, par_index);
1563 if (GET_CODE (elem) == SET
1564 && MEM_P (SET_DEST (elem))
1565 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1566 dwarf2out_frame_debug_expr (elem);
1567 }
1568
1569 for (par_index = 0; par_index < limit; par_index++)
1570 {
1571 elem = XVECEXP (expr, 0, par_index);
1572 if (GET_CODE (elem) == SET
1573 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1574 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1575 dwarf2out_frame_debug_expr (elem);
1576 }
1577 return;
1578 }
1579
1580 gcc_assert (GET_CODE (expr) == SET);
1581
1582 src = SET_SRC (expr);
1583 dest = SET_DEST (expr);
1584
1585 if (REG_P (src))
1586 {
1587 rtx rsi = reg_saved_in (src);
1588 if (rsi)
1589 src = rsi;
1590 }
1591
1592 fde = cfun->fde;
1593
1594 switch (GET_CODE (dest))
1595 {
1596 case REG:
1597 switch (GET_CODE (src))
1598 {
1599 /* Setting FP from SP. */
1600 case REG:
1601 if (cur_cfa->reg == dwf_regno (src))
1602 {
1603 /* Rule 1 */
1604 /* Update the CFA rule wrt SP or FP. Make sure src is
1605 relative to the current CFA register.
1606
1607 We used to require that dest be either SP or FP, but the
1608 ARM copies SP to a temporary register, and from there to
1609 FP. So we just rely on the backends to only set
1610 RTX_FRAME_RELATED_P on appropriate insns. */
1611 cur_cfa->reg = dwf_regno (dest);
1612 cur_trace->cfa_temp.reg = cur_cfa->reg;
1613 cur_trace->cfa_temp.offset = cur_cfa->offset;
1614 }
1615 else
1616 {
1617 /* Saving a register in a register. */
1618 gcc_assert (!fixed_regs [REGNO (dest)]
1619 /* For the SPARC and its register window. */
1620 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1621
1622 /* After stack is aligned, we can only save SP in FP
1623 if drap register is used. In this case, we have
1624 to restore stack pointer with the CFA value and we
1625 don't generate this DWARF information. */
1626 if (fde
1627 && fde->stack_realign
1628 && REGNO (src) == STACK_POINTER_REGNUM)
1629 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1630 && fde->drap_reg != INVALID_REGNUM
1631 && cur_cfa->reg != dwf_regno (src));
1632 else
1633 queue_reg_save (src, dest, 0);
1634 }
1635 break;
1636
1637 case PLUS:
1638 case MINUS:
1639 case LO_SUM:
1640 if (dest == stack_pointer_rtx)
1641 {
1642 /* Rule 2 */
1643 /* Adjusting SP. */
1644 switch (GET_CODE (XEXP (src, 1)))
1645 {
1646 case CONST_INT:
1647 offset = INTVAL (XEXP (src, 1));
1648 break;
1649 case REG:
1650 gcc_assert (dwf_regno (XEXP (src, 1))
1651 == cur_trace->cfa_temp.reg);
1652 offset = cur_trace->cfa_temp.offset;
1653 break;
1654 default:
1655 gcc_unreachable ();
1656 }
1657
1658 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1659 {
1660 /* Restoring SP from FP in the epilogue. */
1661 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1662 cur_cfa->reg = dw_stack_pointer_regnum;
1663 }
1664 else if (GET_CODE (src) == LO_SUM)
1665 /* Assume we've set the source reg of the LO_SUM from sp. */
1666 ;
1667 else
1668 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1669
1670 if (GET_CODE (src) != MINUS)
1671 offset = -offset;
1672 if (cur_cfa->reg == dw_stack_pointer_regnum)
1673 cur_cfa->offset += offset;
1674 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1675 cur_trace->cfa_store.offset += offset;
1676 }
1677 else if (dest == hard_frame_pointer_rtx)
1678 {
1679 /* Rule 3 */
1680 /* Either setting the FP from an offset of the SP,
1681 or adjusting the FP */
1682 gcc_assert (frame_pointer_needed);
1683
1684 gcc_assert (REG_P (XEXP (src, 0))
1685 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1686 && CONST_INT_P (XEXP (src, 1)));
1687 offset = INTVAL (XEXP (src, 1));
1688 if (GET_CODE (src) != MINUS)
1689 offset = -offset;
1690 cur_cfa->offset += offset;
1691 cur_cfa->reg = dw_frame_pointer_regnum;
1692 }
1693 else
1694 {
1695 gcc_assert (GET_CODE (src) != MINUS);
1696
1697 /* Rule 4 */
1698 if (REG_P (XEXP (src, 0))
1699 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1700 && CONST_INT_P (XEXP (src, 1)))
1701 {
1702 /* Setting a temporary CFA register that will be copied
1703 into the FP later on. */
1704 offset = - INTVAL (XEXP (src, 1));
1705 cur_cfa->offset += offset;
1706 cur_cfa->reg = dwf_regno (dest);
1707 /* Or used to save regs to the stack. */
1708 cur_trace->cfa_temp.reg = cur_cfa->reg;
1709 cur_trace->cfa_temp.offset = cur_cfa->offset;
1710 }
1711
1712 /* Rule 5 */
1713 else if (REG_P (XEXP (src, 0))
1714 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1715 && XEXP (src, 1) == stack_pointer_rtx)
1716 {
1717 /* Setting a scratch register that we will use instead
1718 of SP for saving registers to the stack. */
1719 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1720 cur_trace->cfa_store.reg = dwf_regno (dest);
1721 cur_trace->cfa_store.offset
1722 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1723 }
1724
1725 /* Rule 9 */
1726 else if (GET_CODE (src) == LO_SUM
1727 && CONST_INT_P (XEXP (src, 1)))
1728 {
1729 cur_trace->cfa_temp.reg = dwf_regno (dest);
1730 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1731 }
1732 else
1733 gcc_unreachable ();
1734 }
1735 break;
1736
1737 /* Rule 6 */
1738 case CONST_INT:
1739 cur_trace->cfa_temp.reg = dwf_regno (dest);
1740 cur_trace->cfa_temp.offset = INTVAL (src);
1741 break;
1742
1743 /* Rule 7 */
1744 case IOR:
1745 gcc_assert (REG_P (XEXP (src, 0))
1746 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1747 && CONST_INT_P (XEXP (src, 1)));
1748
1749 cur_trace->cfa_temp.reg = dwf_regno (dest);
1750 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1751 break;
1752
1753 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1754 which will fill in all of the bits. */
1755 /* Rule 8 */
1756 case HIGH:
1757 break;
1758
1759 /* Rule 15 */
1760 case UNSPEC:
1761 case UNSPEC_VOLATILE:
1762 /* All unspecs should be represented by REG_CFA_* notes. */
1763 gcc_unreachable ();
1764 return;
1765
1766 /* Rule 16 */
1767 case AND:
1768 /* If this AND operation happens on stack pointer in prologue,
1769 we assume the stack is realigned and we extract the
1770 alignment. */
1771 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1772 {
1773 /* We interpret reg_save differently with stack_realign set.
1774 Thus we must flush whatever we have queued first. */
1775 dwarf2out_flush_queued_reg_saves ();
1776
1777 gcc_assert (cur_trace->cfa_store.reg
1778 == dwf_regno (XEXP (src, 0)));
1779 fde->stack_realign = 1;
1780 fde->stack_realignment = INTVAL (XEXP (src, 1));
1781 cur_trace->cfa_store.offset = 0;
1782
1783 if (cur_cfa->reg != dw_stack_pointer_regnum
1784 && cur_cfa->reg != dw_frame_pointer_regnum)
1785 fde->drap_reg = cur_cfa->reg;
1786 }
1787 return;
1788
1789 default:
1790 gcc_unreachable ();
1791 }
1792 break;
1793
1794 case MEM:
1795
1796 /* Saving a register to the stack. Make sure dest is relative to the
1797 CFA register. */
1798 switch (GET_CODE (XEXP (dest, 0)))
1799 {
1800 /* Rule 10 */
1801 /* With a push. */
1802 case PRE_MODIFY:
1803 case POST_MODIFY:
1804 /* We can't handle variable size modifications. */
1805 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1806 == CONST_INT);
1807 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1808
1809 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1810 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1811
1812 cur_trace->cfa_store.offset += offset;
1813 if (cur_cfa->reg == dw_stack_pointer_regnum)
1814 cur_cfa->offset = cur_trace->cfa_store.offset;
1815
1816 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1817 offset -= cur_trace->cfa_store.offset;
1818 else
1819 offset = -cur_trace->cfa_store.offset;
1820 break;
1821
1822 /* Rule 11 */
1823 case PRE_INC:
1824 case PRE_DEC:
1825 case POST_DEC:
1826 offset = GET_MODE_SIZE (GET_MODE (dest));
1827 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1828 offset = -offset;
1829
1830 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1831 == STACK_POINTER_REGNUM)
1832 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1833
1834 cur_trace->cfa_store.offset += offset;
1835
1836 /* Rule 18: If stack is aligned, we will use FP as a
1837 reference to represent the address of the stored
1838 regiser. */
1839 if (fde
1840 && fde->stack_realign
1841 && REG_P (src)
1842 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1843 {
1844 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1845 cur_trace->cfa_store.offset = 0;
1846 }
1847
1848 if (cur_cfa->reg == dw_stack_pointer_regnum)
1849 cur_cfa->offset = cur_trace->cfa_store.offset;
1850
1851 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1852 offset += -cur_trace->cfa_store.offset;
1853 else
1854 offset = -cur_trace->cfa_store.offset;
1855 break;
1856
1857 /* Rule 12 */
1858 /* With an offset. */
1859 case PLUS:
1860 case MINUS:
1861 case LO_SUM:
1862 {
1863 unsigned int regno;
1864
1865 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1866 && REG_P (XEXP (XEXP (dest, 0), 0)));
1867 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1868 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1869 offset = -offset;
1870
1871 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1872
1873 if (cur_cfa->reg == regno)
1874 offset -= cur_cfa->offset;
1875 else if (cur_trace->cfa_store.reg == regno)
1876 offset -= cur_trace->cfa_store.offset;
1877 else
1878 {
1879 gcc_assert (cur_trace->cfa_temp.reg == regno);
1880 offset -= cur_trace->cfa_temp.offset;
1881 }
1882 }
1883 break;
1884
1885 /* Rule 13 */
1886 /* Without an offset. */
1887 case REG:
1888 {
1889 unsigned int regno = dwf_regno (XEXP (dest, 0));
1890
1891 if (cur_cfa->reg == regno)
1892 offset = -cur_cfa->offset;
1893 else if (cur_trace->cfa_store.reg == regno)
1894 offset = -cur_trace->cfa_store.offset;
1895 else
1896 {
1897 gcc_assert (cur_trace->cfa_temp.reg == regno);
1898 offset = -cur_trace->cfa_temp.offset;
1899 }
1900 }
1901 break;
1902
1903 /* Rule 14 */
1904 case POST_INC:
1905 gcc_assert (cur_trace->cfa_temp.reg
1906 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1907 offset = -cur_trace->cfa_temp.offset;
1908 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1909 break;
1910
1911 default:
1912 gcc_unreachable ();
1913 }
1914
1915 /* Rule 17 */
1916 /* If the source operand of this MEM operation is a memory,
1917 we only care how much stack grew. */
1918 if (MEM_P (src))
1919 break;
1920
1921 if (REG_P (src)
1922 && REGNO (src) != STACK_POINTER_REGNUM
1923 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1924 && dwf_regno (src) == cur_cfa->reg)
1925 {
1926 /* We're storing the current CFA reg into the stack. */
1927
1928 if (cur_cfa->offset == 0)
1929 {
1930 /* Rule 19 */
1931 /* If stack is aligned, putting CFA reg into stack means
1932 we can no longer use reg + offset to represent CFA.
1933 Here we use DW_CFA_def_cfa_expression instead. The
1934 result of this expression equals to the original CFA
1935 value. */
1936 if (fde
1937 && fde->stack_realign
1938 && cur_cfa->indirect == 0
1939 && cur_cfa->reg != dw_frame_pointer_regnum)
1940 {
1941 gcc_assert (fde->drap_reg == cur_cfa->reg);
1942
1943 cur_cfa->indirect = 1;
1944 cur_cfa->reg = dw_frame_pointer_regnum;
1945 cur_cfa->base_offset = offset;
1946 cur_cfa->offset = 0;
1947
1948 fde->drap_reg_saved = 1;
1949 break;
1950 }
1951
1952 /* If the source register is exactly the CFA, assume
1953 we're saving SP like any other register; this happens
1954 on the ARM. */
1955 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1956 break;
1957 }
1958 else
1959 {
1960 /* Otherwise, we'll need to look in the stack to
1961 calculate the CFA. */
1962 rtx x = XEXP (dest, 0);
1963
1964 if (!REG_P (x))
1965 x = XEXP (x, 0);
1966 gcc_assert (REG_P (x));
1967
1968 cur_cfa->reg = dwf_regno (x);
1969 cur_cfa->base_offset = offset;
1970 cur_cfa->indirect = 1;
1971 break;
1972 }
1973 }
1974
1975 if (REG_P (src))
1976 span = targetm.dwarf_register_span (src);
1977 else
1978 span = NULL;
1979
1980 if (!span)
1981 queue_reg_save (src, NULL_RTX, offset);
1982 else
1983 {
1984 /* We have a PARALLEL describing where the contents of SRC live.
1985 Queue register saves for each piece of the PARALLEL. */
1986 HOST_WIDE_INT span_offset = offset;
1987
1988 gcc_assert (GET_CODE (span) == PARALLEL);
1989
1990 const int par_len = XVECLEN (span, 0);
1991 for (int par_index = 0; par_index < par_len; par_index++)
1992 {
1993 rtx elem = XVECEXP (span, 0, par_index);
1994 queue_reg_save (elem, NULL_RTX, span_offset);
1995 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1996 }
1997 }
1998 break;
1999
2000 default:
2001 gcc_unreachable ();
2002 }
2003 }
2004
2005 /* Record call frame debugging information for INSN, which either sets
2006 SP or FP (adjusting how we calculate the frame address) or saves a
2007 register to the stack. */
2008
2009 static void
2010 dwarf2out_frame_debug (rtx_insn *insn)
2011 {
2012 rtx note, n, pat;
2013 bool handled_one = false;
2014
2015 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2016 switch (REG_NOTE_KIND (note))
2017 {
2018 case REG_FRAME_RELATED_EXPR:
2019 pat = XEXP (note, 0);
2020 goto do_frame_expr;
2021
2022 case REG_CFA_DEF_CFA:
2023 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2024 handled_one = true;
2025 break;
2026
2027 case REG_CFA_ADJUST_CFA:
2028 n = XEXP (note, 0);
2029 if (n == NULL)
2030 {
2031 n = PATTERN (insn);
2032 if (GET_CODE (n) == PARALLEL)
2033 n = XVECEXP (n, 0, 0);
2034 }
2035 dwarf2out_frame_debug_adjust_cfa (n);
2036 handled_one = true;
2037 break;
2038
2039 case REG_CFA_OFFSET:
2040 n = XEXP (note, 0);
2041 if (n == NULL)
2042 n = single_set (insn);
2043 dwarf2out_frame_debug_cfa_offset (n);
2044 handled_one = true;
2045 break;
2046
2047 case REG_CFA_REGISTER:
2048 n = XEXP (note, 0);
2049 if (n == NULL)
2050 {
2051 n = PATTERN (insn);
2052 if (GET_CODE (n) == PARALLEL)
2053 n = XVECEXP (n, 0, 0);
2054 }
2055 dwarf2out_frame_debug_cfa_register (n);
2056 handled_one = true;
2057 break;
2058
2059 case REG_CFA_EXPRESSION:
2060 case REG_CFA_VAL_EXPRESSION:
2061 n = XEXP (note, 0);
2062 if (n == NULL)
2063 n = single_set (insn);
2064
2065 if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION)
2066 dwarf2out_frame_debug_cfa_expression (n);
2067 else
2068 dwarf2out_frame_debug_cfa_val_expression (n);
2069
2070 handled_one = true;
2071 break;
2072
2073 case REG_CFA_RESTORE:
2074 n = XEXP (note, 0);
2075 if (n == NULL)
2076 {
2077 n = PATTERN (insn);
2078 if (GET_CODE (n) == PARALLEL)
2079 n = XVECEXP (n, 0, 0);
2080 n = XEXP (n, 0);
2081 }
2082 dwarf2out_frame_debug_cfa_restore (n);
2083 handled_one = true;
2084 break;
2085
2086 case REG_CFA_SET_VDRAP:
2087 n = XEXP (note, 0);
2088 if (REG_P (n))
2089 {
2090 dw_fde_ref fde = cfun->fde;
2091 if (fde)
2092 {
2093 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2094 if (REG_P (n))
2095 fde->vdrap_reg = dwf_regno (n);
2096 }
2097 }
2098 handled_one = true;
2099 break;
2100
2101 case REG_CFA_WINDOW_SAVE:
2102 dwarf2out_frame_debug_cfa_window_save ();
2103 handled_one = true;
2104 break;
2105
2106 case REG_CFA_FLUSH_QUEUE:
2107 /* The actual flush happens elsewhere. */
2108 handled_one = true;
2109 break;
2110
2111 default:
2112 break;
2113 }
2114
2115 if (!handled_one)
2116 {
2117 pat = PATTERN (insn);
2118 do_frame_expr:
2119 dwarf2out_frame_debug_expr (pat);
2120
2121 /* Check again. A parallel can save and update the same register.
2122 We could probably check just once, here, but this is safer than
2123 removing the check at the start of the function. */
2124 if (clobbers_queued_reg_save (pat))
2125 dwarf2out_flush_queued_reg_saves ();
2126 }
2127 }
2128
2129 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2130
2131 static void
2132 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2133 {
2134 size_t i, n_old, n_new, n_max;
2135 dw_cfi_ref cfi;
2136
2137 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2138 add_cfi (new_row->cfa_cfi);
2139 else
2140 {
2141 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2142 if (cfi)
2143 add_cfi (cfi);
2144 }
2145
2146 n_old = vec_safe_length (old_row->reg_save);
2147 n_new = vec_safe_length (new_row->reg_save);
2148 n_max = MAX (n_old, n_new);
2149
2150 for (i = 0; i < n_max; ++i)
2151 {
2152 dw_cfi_ref r_old = NULL, r_new = NULL;
2153
2154 if (i < n_old)
2155 r_old = (*old_row->reg_save)[i];
2156 if (i < n_new)
2157 r_new = (*new_row->reg_save)[i];
2158
2159 if (r_old == r_new)
2160 ;
2161 else if (r_new == NULL)
2162 add_cfi_restore (i);
2163 else if (!cfi_equal_p (r_old, r_new))
2164 add_cfi (r_new);
2165 }
2166 }
2167
2168 /* Examine CFI and return true if a cfi label and set_loc is needed
2169 beforehand. Even when generating CFI assembler instructions, we
2170 still have to add the cfi to the list so that lookup_cfa_1 works
2171 later on. When -g2 and above we even need to force emitting of
2172 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2173 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2174 and so don't use convert_cfa_to_fb_loc_list. */
2175
2176 static bool
2177 cfi_label_required_p (dw_cfi_ref cfi)
2178 {
2179 if (!dwarf2out_do_cfi_asm ())
2180 return true;
2181
2182 if (dwarf_version == 2
2183 && debug_info_level > DINFO_LEVEL_TERSE
2184 && (write_symbols == DWARF2_DEBUG
2185 || write_symbols == VMS_AND_DWARF2_DEBUG))
2186 {
2187 switch (cfi->dw_cfi_opc)
2188 {
2189 case DW_CFA_def_cfa_offset:
2190 case DW_CFA_def_cfa_offset_sf:
2191 case DW_CFA_def_cfa_register:
2192 case DW_CFA_def_cfa:
2193 case DW_CFA_def_cfa_sf:
2194 case DW_CFA_def_cfa_expression:
2195 case DW_CFA_restore_state:
2196 return true;
2197 default:
2198 return false;
2199 }
2200 }
2201 return false;
2202 }
2203
2204 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2205 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2206 necessary. */
2207 static void
2208 add_cfis_to_fde (void)
2209 {
2210 dw_fde_ref fde = cfun->fde;
2211 rtx_insn *insn, *next;
2212 /* We always start with a function_begin label. */
2213 bool first = false;
2214
2215 for (insn = get_insns (); insn; insn = next)
2216 {
2217 next = NEXT_INSN (insn);
2218
2219 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2220 {
2221 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2222 /* Don't attempt to advance_loc4 between labels
2223 in different sections. */
2224 first = true;
2225 }
2226
2227 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2228 {
2229 bool required = cfi_label_required_p (NOTE_CFI (insn));
2230 while (next)
2231 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2232 {
2233 required |= cfi_label_required_p (NOTE_CFI (next));
2234 next = NEXT_INSN (next);
2235 }
2236 else if (active_insn_p (next)
2237 || (NOTE_P (next) && (NOTE_KIND (next)
2238 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2239 break;
2240 else
2241 next = NEXT_INSN (next);
2242 if (required)
2243 {
2244 int num = dwarf2out_cfi_label_num;
2245 const char *label = dwarf2out_cfi_label ();
2246 dw_cfi_ref xcfi;
2247
2248 /* Set the location counter to the new label. */
2249 xcfi = new_cfi ();
2250 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2251 : DW_CFA_advance_loc4);
2252 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2253 vec_safe_push (fde->dw_fde_cfi, xcfi);
2254
2255 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2256 NOTE_LABEL_NUMBER (tmp) = num;
2257 }
2258
2259 do
2260 {
2261 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2262 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2263 insn = NEXT_INSN (insn);
2264 }
2265 while (insn != next);
2266 first = false;
2267 }
2268 }
2269 }
2270
2271 static void dump_cfi_row (FILE *f, dw_cfi_row *row);
2272
2273 /* If LABEL is the start of a trace, then initialize the state of that
2274 trace from CUR_TRACE and CUR_ROW. */
2275
2276 static void
2277 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2278 {
2279 dw_trace_info *ti;
2280 HOST_WIDE_INT args_size;
2281
2282 ti = get_trace_info (start);
2283 gcc_assert (ti != NULL);
2284
2285 if (dump_file)
2286 {
2287 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2288 cur_trace->id, ti->id,
2289 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2290 (origin ? INSN_UID (origin) : 0));
2291 }
2292
2293 args_size = cur_trace->end_true_args_size;
2294 if (ti->beg_row == NULL)
2295 {
2296 /* This is the first time we've encountered this trace. Propagate
2297 state across the edge and push the trace onto the work list. */
2298 ti->beg_row = copy_cfi_row (cur_row);
2299 ti->beg_true_args_size = args_size;
2300
2301 ti->cfa_store = cur_trace->cfa_store;
2302 ti->cfa_temp = cur_trace->cfa_temp;
2303 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2304
2305 trace_work_list.safe_push (ti);
2306
2307 if (dump_file)
2308 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2309 }
2310 else
2311 {
2312
2313 /* We ought to have the same state incoming to a given trace no
2314 matter how we arrive at the trace. Anything else means we've
2315 got some kind of optimization error. */
2316 #if CHECKING_P
2317 if (!cfi_row_equal_p (cur_row, ti->beg_row))
2318 {
2319 if (dump_file)
2320 {
2321 fprintf (dump_file, "Inconsistent CFI state!\n");
2322 fprintf (dump_file, "SHOULD have:\n");
2323 dump_cfi_row (dump_file, ti->beg_row);
2324 fprintf (dump_file, "DO have:\n");
2325 dump_cfi_row (dump_file, cur_row);
2326 }
2327
2328 gcc_unreachable ();
2329 }
2330 #endif
2331
2332 /* The args_size is allowed to conflict if it isn't actually used. */
2333 if (ti->beg_true_args_size != args_size)
2334 ti->args_size_undefined = true;
2335 }
2336 }
2337
2338 /* Similarly, but handle the args_size and CFA reset across EH
2339 and non-local goto edges. */
2340
2341 static void
2342 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2343 {
2344 HOST_WIDE_INT save_args_size, delta;
2345 dw_cfa_location save_cfa;
2346
2347 save_args_size = cur_trace->end_true_args_size;
2348 if (save_args_size == 0)
2349 {
2350 maybe_record_trace_start (start, origin);
2351 return;
2352 }
2353
2354 delta = -save_args_size;
2355 cur_trace->end_true_args_size = 0;
2356
2357 save_cfa = cur_row->cfa;
2358 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2359 {
2360 /* Convert a change in args_size (always a positive in the
2361 direction of stack growth) to a change in stack pointer. */
2362 if (!STACK_GROWS_DOWNWARD)
2363 delta = -delta;
2364
2365 cur_row->cfa.offset += delta;
2366 }
2367
2368 maybe_record_trace_start (start, origin);
2369
2370 cur_trace->end_true_args_size = save_args_size;
2371 cur_row->cfa = save_cfa;
2372 }
2373
2374 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2375 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2376
2377 static void
2378 create_trace_edges (rtx_insn *insn)
2379 {
2380 rtx tmp;
2381 int i, n;
2382
2383 if (JUMP_P (insn))
2384 {
2385 rtx_jump_table_data *table;
2386
2387 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2388 return;
2389
2390 if (tablejump_p (insn, NULL, &table))
2391 {
2392 rtvec vec = table->get_labels ();
2393
2394 n = GET_NUM_ELEM (vec);
2395 for (i = 0; i < n; ++i)
2396 {
2397 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2398 maybe_record_trace_start (lab, insn);
2399 }
2400 }
2401 else if (computed_jump_p (insn))
2402 {
2403 rtx_insn *temp;
2404 unsigned int i;
2405 FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
2406 maybe_record_trace_start (temp, insn);
2407 }
2408 else if (returnjump_p (insn))
2409 ;
2410 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2411 {
2412 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2413 for (i = 0; i < n; ++i)
2414 {
2415 rtx_insn *lab =
2416 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2417 maybe_record_trace_start (lab, insn);
2418 }
2419 }
2420 else
2421 {
2422 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2423 gcc_assert (lab != NULL);
2424 maybe_record_trace_start (lab, insn);
2425 }
2426 }
2427 else if (CALL_P (insn))
2428 {
2429 /* Sibling calls don't have edges inside this function. */
2430 if (SIBLING_CALL_P (insn))
2431 return;
2432
2433 /* Process non-local goto edges. */
2434 if (can_nonlocal_goto (insn))
2435 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2436 lab;
2437 lab = lab->next ())
2438 maybe_record_trace_start_abnormal (lab->insn (), insn);
2439 }
2440 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2441 {
2442 int i, n = seq->len ();
2443 for (i = 0; i < n; ++i)
2444 create_trace_edges (seq->insn (i));
2445 return;
2446 }
2447
2448 /* Process EH edges. */
2449 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2450 {
2451 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2452 if (lp)
2453 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2454 }
2455 }
2456
2457 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2458
2459 static void
2460 scan_insn_after (rtx_insn *insn)
2461 {
2462 if (RTX_FRAME_RELATED_P (insn))
2463 dwarf2out_frame_debug (insn);
2464 notice_args_size (insn);
2465 }
2466
2467 /* Scan the trace beginning at INSN and create the CFI notes for the
2468 instructions therein. */
2469
2470 static void
2471 scan_trace (dw_trace_info *trace)
2472 {
2473 rtx_insn *prev, *insn = trace->head;
2474 dw_cfa_location this_cfa;
2475
2476 if (dump_file)
2477 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2478 trace->id, rtx_name[(int) GET_CODE (insn)],
2479 INSN_UID (insn));
2480
2481 trace->end_row = copy_cfi_row (trace->beg_row);
2482 trace->end_true_args_size = trace->beg_true_args_size;
2483
2484 cur_trace = trace;
2485 cur_row = trace->end_row;
2486
2487 this_cfa = cur_row->cfa;
2488 cur_cfa = &this_cfa;
2489
2490 for (prev = insn, insn = NEXT_INSN (insn);
2491 insn;
2492 prev = insn, insn = NEXT_INSN (insn))
2493 {
2494 rtx_insn *control;
2495
2496 /* Do everything that happens "before" the insn. */
2497 add_cfi_insn = prev;
2498
2499 /* Notice the end of a trace. */
2500 if (BARRIER_P (insn))
2501 {
2502 /* Don't bother saving the unneeded queued registers at all. */
2503 queued_reg_saves.truncate (0);
2504 break;
2505 }
2506 if (save_point_p (insn))
2507 {
2508 /* Propagate across fallthru edges. */
2509 dwarf2out_flush_queued_reg_saves ();
2510 maybe_record_trace_start (insn, NULL);
2511 break;
2512 }
2513
2514 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2515 continue;
2516
2517 /* Handle all changes to the row state. Sequences require special
2518 handling for the positioning of the notes. */
2519 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2520 {
2521 rtx_insn *elt;
2522 int i, n = pat->len ();
2523
2524 control = pat->insn (0);
2525 if (can_throw_internal (control))
2526 notice_eh_throw (control);
2527 dwarf2out_flush_queued_reg_saves ();
2528
2529 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2530 {
2531 /* ??? Hopefully multiple delay slots are not annulled. */
2532 gcc_assert (n == 2);
2533 gcc_assert (!RTX_FRAME_RELATED_P (control));
2534 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2535
2536 elt = pat->insn (1);
2537
2538 if (INSN_FROM_TARGET_P (elt))
2539 {
2540 HOST_WIDE_INT restore_args_size;
2541 cfi_vec save_row_reg_save;
2542
2543 /* If ELT is an instruction from target of an annulled
2544 branch, the effects are for the target only and so
2545 the args_size and CFA along the current path
2546 shouldn't change. */
2547 add_cfi_insn = NULL;
2548 restore_args_size = cur_trace->end_true_args_size;
2549 cur_cfa = &cur_row->cfa;
2550 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2551
2552 scan_insn_after (elt);
2553
2554 /* ??? Should we instead save the entire row state? */
2555 gcc_assert (!queued_reg_saves.length ());
2556
2557 create_trace_edges (control);
2558
2559 cur_trace->end_true_args_size = restore_args_size;
2560 cur_row->cfa = this_cfa;
2561 cur_row->reg_save = save_row_reg_save;
2562 cur_cfa = &this_cfa;
2563 }
2564 else
2565 {
2566 /* If ELT is a annulled branch-taken instruction (i.e.
2567 executed only when branch is not taken), the args_size
2568 and CFA should not change through the jump. */
2569 create_trace_edges (control);
2570
2571 /* Update and continue with the trace. */
2572 add_cfi_insn = insn;
2573 scan_insn_after (elt);
2574 def_cfa_1 (&this_cfa);
2575 }
2576 continue;
2577 }
2578
2579 /* The insns in the delay slot should all be considered to happen
2580 "before" a call insn. Consider a call with a stack pointer
2581 adjustment in the delay slot. The backtrace from the callee
2582 should include the sp adjustment. Unfortunately, that leaves
2583 us with an unavoidable unwinding error exactly at the call insn
2584 itself. For jump insns we'd prefer to avoid this error by
2585 placing the notes after the sequence. */
2586 if (JUMP_P (control))
2587 add_cfi_insn = insn;
2588
2589 for (i = 1; i < n; ++i)
2590 {
2591 elt = pat->insn (i);
2592 scan_insn_after (elt);
2593 }
2594
2595 /* Make sure any register saves are visible at the jump target. */
2596 dwarf2out_flush_queued_reg_saves ();
2597 any_cfis_emitted = false;
2598
2599 /* However, if there is some adjustment on the call itself, e.g.
2600 a call_pop, that action should be considered to happen after
2601 the call returns. */
2602 add_cfi_insn = insn;
2603 scan_insn_after (control);
2604 }
2605 else
2606 {
2607 /* Flush data before calls and jumps, and of course if necessary. */
2608 if (can_throw_internal (insn))
2609 {
2610 notice_eh_throw (insn);
2611 dwarf2out_flush_queued_reg_saves ();
2612 }
2613 else if (!NONJUMP_INSN_P (insn)
2614 || clobbers_queued_reg_save (insn)
2615 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2616 dwarf2out_flush_queued_reg_saves ();
2617 any_cfis_emitted = false;
2618
2619 add_cfi_insn = insn;
2620 scan_insn_after (insn);
2621 control = insn;
2622 }
2623
2624 /* Between frame-related-p and args_size we might have otherwise
2625 emitted two cfa adjustments. Do it now. */
2626 def_cfa_1 (&this_cfa);
2627
2628 /* Minimize the number of advances by emitting the entire queue
2629 once anything is emitted. */
2630 if (any_cfis_emitted
2631 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2632 dwarf2out_flush_queued_reg_saves ();
2633
2634 /* Note that a test for control_flow_insn_p does exactly the
2635 same tests as are done to actually create the edges. So
2636 always call the routine and let it not create edges for
2637 non-control-flow insns. */
2638 create_trace_edges (control);
2639 }
2640
2641 add_cfi_insn = NULL;
2642 cur_row = NULL;
2643 cur_trace = NULL;
2644 cur_cfa = NULL;
2645 }
2646
2647 /* Scan the function and create the initial set of CFI notes. */
2648
2649 static void
2650 create_cfi_notes (void)
2651 {
2652 dw_trace_info *ti;
2653
2654 gcc_checking_assert (!queued_reg_saves.exists ());
2655 gcc_checking_assert (!trace_work_list.exists ());
2656
2657 /* Always begin at the entry trace. */
2658 ti = &trace_info[0];
2659 scan_trace (ti);
2660
2661 while (!trace_work_list.is_empty ())
2662 {
2663 ti = trace_work_list.pop ();
2664 scan_trace (ti);
2665 }
2666
2667 queued_reg_saves.release ();
2668 trace_work_list.release ();
2669 }
2670
2671 /* Return the insn before the first NOTE_INSN_CFI after START. */
2672
2673 static rtx_insn *
2674 before_next_cfi_note (rtx_insn *start)
2675 {
2676 rtx_insn *prev = start;
2677 while (start)
2678 {
2679 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2680 return prev;
2681 prev = start;
2682 start = NEXT_INSN (start);
2683 }
2684 gcc_unreachable ();
2685 }
2686
2687 /* Insert CFI notes between traces to properly change state between them. */
2688
2689 static void
2690 connect_traces (void)
2691 {
2692 unsigned i, n = trace_info.length ();
2693 dw_trace_info *prev_ti, *ti;
2694
2695 /* ??? Ideally, we should have both queued and processed every trace.
2696 However the current representation of constant pools on various targets
2697 is indistinguishable from unreachable code. Assume for the moment that
2698 we can simply skip over such traces. */
2699 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2700 these are not "real" instructions, and should not be considered.
2701 This could be generically useful for tablejump data as well. */
2702 /* Remove all unprocessed traces from the list. */
2703 for (i = n - 1; i > 0; --i)
2704 {
2705 ti = &trace_info[i];
2706 if (ti->beg_row == NULL)
2707 {
2708 trace_info.ordered_remove (i);
2709 n -= 1;
2710 }
2711 else
2712 gcc_assert (ti->end_row != NULL);
2713 }
2714
2715 /* Work from the end back to the beginning. This lets us easily insert
2716 remember/restore_state notes in the correct order wrt other notes. */
2717 prev_ti = &trace_info[n - 1];
2718 for (i = n - 1; i > 0; --i)
2719 {
2720 dw_cfi_row *old_row;
2721
2722 ti = prev_ti;
2723 prev_ti = &trace_info[i - 1];
2724
2725 add_cfi_insn = ti->head;
2726
2727 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2728 for the portion of the function in the alternate text
2729 section. The row state at the very beginning of that
2730 new FDE will be exactly the row state from the CIE. */
2731 if (ti->switch_sections)
2732 old_row = cie_cfi_row;
2733 else
2734 {
2735 old_row = prev_ti->end_row;
2736 /* If there's no change from the previous end state, fine. */
2737 if (cfi_row_equal_p (old_row, ti->beg_row))
2738 ;
2739 /* Otherwise check for the common case of sharing state with
2740 the beginning of an epilogue, but not the end. Insert
2741 remember/restore opcodes in that case. */
2742 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2743 {
2744 dw_cfi_ref cfi;
2745
2746 /* Note that if we blindly insert the remember at the
2747 start of the trace, we can wind up increasing the
2748 size of the unwind info due to extra advance opcodes.
2749 Instead, put the remember immediately before the next
2750 state change. We know there must be one, because the
2751 state at the beginning and head of the trace differ. */
2752 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2753 cfi = new_cfi ();
2754 cfi->dw_cfi_opc = DW_CFA_remember_state;
2755 add_cfi (cfi);
2756
2757 add_cfi_insn = ti->head;
2758 cfi = new_cfi ();
2759 cfi->dw_cfi_opc = DW_CFA_restore_state;
2760 add_cfi (cfi);
2761
2762 old_row = prev_ti->beg_row;
2763 }
2764 /* Otherwise, we'll simply change state from the previous end. */
2765 }
2766
2767 change_cfi_row (old_row, ti->beg_row);
2768
2769 if (dump_file && add_cfi_insn != ti->head)
2770 {
2771 rtx_insn *note;
2772
2773 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2774 prev_ti->id, ti->id);
2775
2776 note = ti->head;
2777 do
2778 {
2779 note = NEXT_INSN (note);
2780 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2781 output_cfi_directive (dump_file, NOTE_CFI (note));
2782 }
2783 while (note != add_cfi_insn);
2784 }
2785 }
2786
2787 /* Connect args_size between traces that have can_throw_internal insns. */
2788 if (cfun->eh->lp_array)
2789 {
2790 HOST_WIDE_INT prev_args_size = 0;
2791
2792 for (i = 0; i < n; ++i)
2793 {
2794 ti = &trace_info[i];
2795
2796 if (ti->switch_sections)
2797 prev_args_size = 0;
2798 if (ti->eh_head == NULL)
2799 continue;
2800 gcc_assert (!ti->args_size_undefined);
2801
2802 if (ti->beg_delay_args_size != prev_args_size)
2803 {
2804 /* ??? Search back to previous CFI note. */
2805 add_cfi_insn = PREV_INSN (ti->eh_head);
2806 add_cfi_args_size (ti->beg_delay_args_size);
2807 }
2808
2809 prev_args_size = ti->end_delay_args_size;
2810 }
2811 }
2812 }
2813
2814 /* Set up the pseudo-cfg of instruction traces, as described at the
2815 block comment at the top of the file. */
2816
2817 static void
2818 create_pseudo_cfg (void)
2819 {
2820 bool saw_barrier, switch_sections;
2821 dw_trace_info ti;
2822 rtx_insn *insn;
2823 unsigned i;
2824
2825 /* The first trace begins at the start of the function,
2826 and begins with the CIE row state. */
2827 trace_info.create (16);
2828 memset (&ti, 0, sizeof (ti));
2829 ti.head = get_insns ();
2830 ti.beg_row = cie_cfi_row;
2831 ti.cfa_store = cie_cfi_row->cfa;
2832 ti.cfa_temp.reg = INVALID_REGNUM;
2833 trace_info.quick_push (ti);
2834
2835 if (cie_return_save)
2836 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2837
2838 /* Walk all the insns, collecting start of trace locations. */
2839 saw_barrier = false;
2840 switch_sections = false;
2841 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2842 {
2843 if (BARRIER_P (insn))
2844 saw_barrier = true;
2845 else if (NOTE_P (insn)
2846 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2847 {
2848 /* We should have just seen a barrier. */
2849 gcc_assert (saw_barrier);
2850 switch_sections = true;
2851 }
2852 /* Watch out for save_point notes between basic blocks.
2853 In particular, a note after a barrier. Do not record these,
2854 delaying trace creation until the label. */
2855 else if (save_point_p (insn)
2856 && (LABEL_P (insn) || !saw_barrier))
2857 {
2858 memset (&ti, 0, sizeof (ti));
2859 ti.head = insn;
2860 ti.switch_sections = switch_sections;
2861 ti.id = trace_info.length ();
2862 trace_info.safe_push (ti);
2863
2864 saw_barrier = false;
2865 switch_sections = false;
2866 }
2867 }
2868
2869 /* Create the trace index after we've finished building trace_info,
2870 avoiding stale pointer problems due to reallocation. */
2871 trace_index
2872 = new hash_table<trace_info_hasher> (trace_info.length ());
2873 dw_trace_info *tp;
2874 FOR_EACH_VEC_ELT (trace_info, i, tp)
2875 {
2876 dw_trace_info **slot;
2877
2878 if (dump_file)
2879 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2880 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2881 tp->switch_sections ? " (section switch)" : "");
2882
2883 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2884 gcc_assert (*slot == NULL);
2885 *slot = tp;
2886 }
2887 }
2888
2889 /* Record the initial position of the return address. RTL is
2890 INCOMING_RETURN_ADDR_RTX. */
2891
2892 static void
2893 initial_return_save (rtx rtl)
2894 {
2895 unsigned int reg = INVALID_REGNUM;
2896 HOST_WIDE_INT offset = 0;
2897
2898 switch (GET_CODE (rtl))
2899 {
2900 case REG:
2901 /* RA is in a register. */
2902 reg = dwf_regno (rtl);
2903 break;
2904
2905 case MEM:
2906 /* RA is on the stack. */
2907 rtl = XEXP (rtl, 0);
2908 switch (GET_CODE (rtl))
2909 {
2910 case REG:
2911 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2912 offset = 0;
2913 break;
2914
2915 case PLUS:
2916 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2917 offset = INTVAL (XEXP (rtl, 1));
2918 break;
2919
2920 case MINUS:
2921 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2922 offset = -INTVAL (XEXP (rtl, 1));
2923 break;
2924
2925 default:
2926 gcc_unreachable ();
2927 }
2928
2929 break;
2930
2931 case PLUS:
2932 /* The return address is at some offset from any value we can
2933 actually load. For instance, on the SPARC it is in %i7+8. Just
2934 ignore the offset for now; it doesn't matter for unwinding frames. */
2935 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2936 initial_return_save (XEXP (rtl, 0));
2937 return;
2938
2939 default:
2940 gcc_unreachable ();
2941 }
2942
2943 if (reg != DWARF_FRAME_RETURN_COLUMN)
2944 {
2945 if (reg != INVALID_REGNUM)
2946 record_reg_saved_in_reg (rtl, pc_rtx);
2947 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2948 }
2949 }
2950
2951 static void
2952 create_cie_data (void)
2953 {
2954 dw_cfa_location loc;
2955 dw_trace_info cie_trace;
2956
2957 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2958
2959 memset (&cie_trace, 0, sizeof (cie_trace));
2960 cur_trace = &cie_trace;
2961
2962 add_cfi_vec = &cie_cfi_vec;
2963 cie_cfi_row = cur_row = new_cfi_row ();
2964
2965 /* On entry, the Canonical Frame Address is at SP. */
2966 memset (&loc, 0, sizeof (loc));
2967 loc.reg = dw_stack_pointer_regnum;
2968 loc.offset = INCOMING_FRAME_SP_OFFSET;
2969 def_cfa_1 (&loc);
2970
2971 if (targetm.debug_unwind_info () == UI_DWARF2
2972 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2973 {
2974 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2975
2976 /* For a few targets, we have the return address incoming into a
2977 register, but choose a different return column. This will result
2978 in a DW_CFA_register for the return, and an entry in
2979 regs_saved_in_regs to match. If the target later stores that
2980 return address register to the stack, we want to be able to emit
2981 the DW_CFA_offset against the return column, not the intermediate
2982 save register. Save the contents of regs_saved_in_regs so that
2983 we can re-initialize it at the start of each function. */
2984 switch (cie_trace.regs_saved_in_regs.length ())
2985 {
2986 case 0:
2987 break;
2988 case 1:
2989 cie_return_save = ggc_alloc<reg_saved_in_data> ();
2990 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2991 cie_trace.regs_saved_in_regs.release ();
2992 break;
2993 default:
2994 gcc_unreachable ();
2995 }
2996 }
2997
2998 add_cfi_vec = NULL;
2999 cur_row = NULL;
3000 cur_trace = NULL;
3001 }
3002
3003 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3004 state at each location within the function. These notes will be
3005 emitted during pass_final. */
3006
3007 static unsigned int
3008 execute_dwarf2_frame (void)
3009 {
3010 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
3011 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
3012
3013 /* The first time we're called, compute the incoming frame state. */
3014 if (cie_cfi_vec == NULL)
3015 create_cie_data ();
3016
3017 dwarf2out_alloc_current_fde ();
3018
3019 create_pseudo_cfg ();
3020
3021 /* Do the work. */
3022 create_cfi_notes ();
3023 connect_traces ();
3024 add_cfis_to_fde ();
3025
3026 /* Free all the data we allocated. */
3027 {
3028 size_t i;
3029 dw_trace_info *ti;
3030
3031 FOR_EACH_VEC_ELT (trace_info, i, ti)
3032 ti->regs_saved_in_regs.release ();
3033 }
3034 trace_info.release ();
3035
3036 delete trace_index;
3037 trace_index = NULL;
3038
3039 return 0;
3040 }
3041 \f
3042 /* Convert a DWARF call frame info. operation to its string name */
3043
3044 static const char *
3045 dwarf_cfi_name (unsigned int cfi_opc)
3046 {
3047 const char *name = get_DW_CFA_name (cfi_opc);
3048
3049 if (name != NULL)
3050 return name;
3051
3052 return "DW_CFA_<unknown>";
3053 }
3054
3055 /* This routine will generate the correct assembly data for a location
3056 description based on a cfi entry with a complex address. */
3057
3058 static void
3059 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3060 {
3061 dw_loc_descr_ref loc;
3062 unsigned long size;
3063
3064 if (cfi->dw_cfi_opc == DW_CFA_expression
3065 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3066 {
3067 unsigned r =
3068 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3069 dw2_asm_output_data (1, r, NULL);
3070 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3071 }
3072 else
3073 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3074
3075 /* Output the size of the block. */
3076 size = size_of_locs (loc);
3077 dw2_asm_output_data_uleb128 (size, NULL);
3078
3079 /* Now output the operations themselves. */
3080 output_loc_sequence (loc, for_eh);
3081 }
3082
3083 /* Similar, but used for .cfi_escape. */
3084
3085 static void
3086 output_cfa_loc_raw (dw_cfi_ref cfi)
3087 {
3088 dw_loc_descr_ref loc;
3089 unsigned long size;
3090
3091 if (cfi->dw_cfi_opc == DW_CFA_expression
3092 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3093 {
3094 unsigned r =
3095 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3096 fprintf (asm_out_file, "%#x,", r);
3097 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3098 }
3099 else
3100 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3101
3102 /* Output the size of the block. */
3103 size = size_of_locs (loc);
3104 dw2_asm_output_data_uleb128_raw (size);
3105 fputc (',', asm_out_file);
3106
3107 /* Now output the operations themselves. */
3108 output_loc_sequence_raw (loc);
3109 }
3110
3111 /* Output a Call Frame Information opcode and its operand(s). */
3112
3113 void
3114 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3115 {
3116 unsigned long r;
3117 HOST_WIDE_INT off;
3118
3119 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3120 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3121 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3122 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3123 ((unsigned HOST_WIDE_INT)
3124 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3125 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3126 {
3127 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3128 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3129 "DW_CFA_offset, column %#lx", r);
3130 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3131 dw2_asm_output_data_uleb128 (off, NULL);
3132 }
3133 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3134 {
3135 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3136 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3137 "DW_CFA_restore, column %#lx", r);
3138 }
3139 else
3140 {
3141 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3142 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3143
3144 switch (cfi->dw_cfi_opc)
3145 {
3146 case DW_CFA_set_loc:
3147 if (for_eh)
3148 dw2_asm_output_encoded_addr_rtx (
3149 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3150 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3151 false, NULL);
3152 else
3153 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3154 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3155 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3156 break;
3157
3158 case DW_CFA_advance_loc1:
3159 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3160 fde->dw_fde_current_label, NULL);
3161 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3162 break;
3163
3164 case DW_CFA_advance_loc2:
3165 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3166 fde->dw_fde_current_label, NULL);
3167 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3168 break;
3169
3170 case DW_CFA_advance_loc4:
3171 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3172 fde->dw_fde_current_label, NULL);
3173 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3174 break;
3175
3176 case DW_CFA_MIPS_advance_loc8:
3177 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3178 fde->dw_fde_current_label, NULL);
3179 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3180 break;
3181
3182 case DW_CFA_offset_extended:
3183 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3184 dw2_asm_output_data_uleb128 (r, NULL);
3185 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3186 dw2_asm_output_data_uleb128 (off, NULL);
3187 break;
3188
3189 case DW_CFA_def_cfa:
3190 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3191 dw2_asm_output_data_uleb128 (r, NULL);
3192 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3193 break;
3194
3195 case DW_CFA_offset_extended_sf:
3196 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3197 dw2_asm_output_data_uleb128 (r, NULL);
3198 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3199 dw2_asm_output_data_sleb128 (off, NULL);
3200 break;
3201
3202 case DW_CFA_def_cfa_sf:
3203 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3204 dw2_asm_output_data_uleb128 (r, NULL);
3205 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3206 dw2_asm_output_data_sleb128 (off, NULL);
3207 break;
3208
3209 case DW_CFA_restore_extended:
3210 case DW_CFA_undefined:
3211 case DW_CFA_same_value:
3212 case DW_CFA_def_cfa_register:
3213 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3214 dw2_asm_output_data_uleb128 (r, NULL);
3215 break;
3216
3217 case DW_CFA_register:
3218 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3219 dw2_asm_output_data_uleb128 (r, NULL);
3220 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3221 dw2_asm_output_data_uleb128 (r, NULL);
3222 break;
3223
3224 case DW_CFA_def_cfa_offset:
3225 case DW_CFA_GNU_args_size:
3226 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3227 break;
3228
3229 case DW_CFA_def_cfa_offset_sf:
3230 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3231 dw2_asm_output_data_sleb128 (off, NULL);
3232 break;
3233
3234 case DW_CFA_GNU_window_save:
3235 break;
3236
3237 case DW_CFA_def_cfa_expression:
3238 case DW_CFA_expression:
3239 case DW_CFA_val_expression:
3240 output_cfa_loc (cfi, for_eh);
3241 break;
3242
3243 case DW_CFA_GNU_negative_offset_extended:
3244 /* Obsoleted by DW_CFA_offset_extended_sf. */
3245 gcc_unreachable ();
3246
3247 default:
3248 break;
3249 }
3250 }
3251 }
3252
3253 /* Similar, but do it via assembler directives instead. */
3254
3255 void
3256 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3257 {
3258 unsigned long r, r2;
3259
3260 switch (cfi->dw_cfi_opc)
3261 {
3262 case DW_CFA_advance_loc:
3263 case DW_CFA_advance_loc1:
3264 case DW_CFA_advance_loc2:
3265 case DW_CFA_advance_loc4:
3266 case DW_CFA_MIPS_advance_loc8:
3267 case DW_CFA_set_loc:
3268 /* Should only be created in a code path not followed when emitting
3269 via directives. The assembler is going to take care of this for
3270 us. But this routines is also used for debugging dumps, so
3271 print something. */
3272 gcc_assert (f != asm_out_file);
3273 fprintf (f, "\t.cfi_advance_loc\n");
3274 break;
3275
3276 case DW_CFA_offset:
3277 case DW_CFA_offset_extended:
3278 case DW_CFA_offset_extended_sf:
3279 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3280 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3281 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3282 break;
3283
3284 case DW_CFA_restore:
3285 case DW_CFA_restore_extended:
3286 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3287 fprintf (f, "\t.cfi_restore %lu\n", r);
3288 break;
3289
3290 case DW_CFA_undefined:
3291 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3292 fprintf (f, "\t.cfi_undefined %lu\n", r);
3293 break;
3294
3295 case DW_CFA_same_value:
3296 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3297 fprintf (f, "\t.cfi_same_value %lu\n", r);
3298 break;
3299
3300 case DW_CFA_def_cfa:
3301 case DW_CFA_def_cfa_sf:
3302 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3303 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3304 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3305 break;
3306
3307 case DW_CFA_def_cfa_register:
3308 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3309 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3310 break;
3311
3312 case DW_CFA_register:
3313 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3314 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3315 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3316 break;
3317
3318 case DW_CFA_def_cfa_offset:
3319 case DW_CFA_def_cfa_offset_sf:
3320 fprintf (f, "\t.cfi_def_cfa_offset "
3321 HOST_WIDE_INT_PRINT_DEC"\n",
3322 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3323 break;
3324
3325 case DW_CFA_remember_state:
3326 fprintf (f, "\t.cfi_remember_state\n");
3327 break;
3328 case DW_CFA_restore_state:
3329 fprintf (f, "\t.cfi_restore_state\n");
3330 break;
3331
3332 case DW_CFA_GNU_args_size:
3333 if (f == asm_out_file)
3334 {
3335 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3336 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3337 if (flag_debug_asm)
3338 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3339 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3340 fputc ('\n', f);
3341 }
3342 else
3343 {
3344 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3345 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3346 }
3347 break;
3348
3349 case DW_CFA_GNU_window_save:
3350 fprintf (f, "\t.cfi_window_save\n");
3351 break;
3352
3353 case DW_CFA_def_cfa_expression:
3354 case DW_CFA_expression:
3355 case DW_CFA_val_expression:
3356 if (f != asm_out_file)
3357 {
3358 fprintf (f, "\t.cfi_%scfa_%sexpression ...\n",
3359 cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "",
3360 cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : "");
3361 break;
3362 }
3363 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3364 output_cfa_loc_raw (cfi);
3365 fputc ('\n', f);
3366 break;
3367
3368 default:
3369 gcc_unreachable ();
3370 }
3371 }
3372
3373 void
3374 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3375 {
3376 if (dwarf2out_do_cfi_asm ())
3377 output_cfi_directive (asm_out_file, cfi);
3378 }
3379
3380 static void
3381 dump_cfi_row (FILE *f, dw_cfi_row *row)
3382 {
3383 dw_cfi_ref cfi;
3384 unsigned i;
3385
3386 cfi = row->cfa_cfi;
3387 if (!cfi)
3388 {
3389 dw_cfa_location dummy;
3390 memset (&dummy, 0, sizeof (dummy));
3391 dummy.reg = INVALID_REGNUM;
3392 cfi = def_cfa_0 (&dummy, &row->cfa);
3393 }
3394 output_cfi_directive (f, cfi);
3395
3396 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3397 if (cfi)
3398 output_cfi_directive (f, cfi);
3399 }
3400
3401 void debug_cfi_row (dw_cfi_row *row);
3402
3403 void
3404 debug_cfi_row (dw_cfi_row *row)
3405 {
3406 dump_cfi_row (stderr, row);
3407 }
3408 \f
3409
3410 /* Save the result of dwarf2out_do_frame across PCH.
3411 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3412 static GTY(()) signed char saved_do_cfi_asm = 0;
3413
3414 /* Decide whether we want to emit frame unwind information for the current
3415 translation unit. */
3416
3417 bool
3418 dwarf2out_do_frame (void)
3419 {
3420 /* We want to emit correct CFA location expressions or lists, so we
3421 have to return true if we're going to output debug info, even if
3422 we're not going to output frame or unwind info. */
3423 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3424 return true;
3425
3426 if (saved_do_cfi_asm > 0)
3427 return true;
3428
3429 if (targetm.debug_unwind_info () == UI_DWARF2)
3430 return true;
3431
3432 if ((flag_unwind_tables || flag_exceptions)
3433 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3434 return true;
3435
3436 return false;
3437 }
3438
3439 /* Decide whether to emit frame unwind via assembler directives. */
3440
3441 bool
3442 dwarf2out_do_cfi_asm (void)
3443 {
3444 int enc;
3445
3446 if (saved_do_cfi_asm != 0)
3447 return saved_do_cfi_asm > 0;
3448
3449 /* Assume failure for a moment. */
3450 saved_do_cfi_asm = -1;
3451
3452 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3453 return false;
3454 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3455 return false;
3456
3457 /* Make sure the personality encoding is one the assembler can support.
3458 In particular, aligned addresses can't be handled. */
3459 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3460 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3461 return false;
3462 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3463 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3464 return false;
3465
3466 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3467 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3468 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3469 && !flag_unwind_tables && !flag_exceptions
3470 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3471 return false;
3472
3473 /* Success! */
3474 saved_do_cfi_asm = 1;
3475 return true;
3476 }
3477
3478 namespace {
3479
3480 const pass_data pass_data_dwarf2_frame =
3481 {
3482 RTL_PASS, /* type */
3483 "dwarf2", /* name */
3484 OPTGROUP_NONE, /* optinfo_flags */
3485 TV_FINAL, /* tv_id */
3486 0, /* properties_required */
3487 0, /* properties_provided */
3488 0, /* properties_destroyed */
3489 0, /* todo_flags_start */
3490 0, /* todo_flags_finish */
3491 };
3492
3493 class pass_dwarf2_frame : public rtl_opt_pass
3494 {
3495 public:
3496 pass_dwarf2_frame (gcc::context *ctxt)
3497 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3498 {}
3499
3500 /* opt_pass methods: */
3501 virtual bool gate (function *);
3502 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3503
3504 }; // class pass_dwarf2_frame
3505
3506 bool
3507 pass_dwarf2_frame::gate (function *)
3508 {
3509 /* Targets which still implement the prologue in assembler text
3510 cannot use the generic dwarf2 unwinding. */
3511 if (!targetm.have_prologue ())
3512 return false;
3513
3514 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3515 from the optimized shrink-wrapping annotations that we will compute.
3516 For now, only produce the CFI notes for dwarf2. */
3517 return dwarf2out_do_frame ();
3518 }
3519
3520 } // anon namespace
3521
3522 rtl_opt_pass *
3523 make_pass_dwarf2_frame (gcc::context *ctxt)
3524 {
3525 return new pass_dwarf2_frame (ctxt);
3526 }
3527
3528 #include "gt-dwarf2cfi.h"