]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/dwarf2cfi.c
Move MEMMODEL_* from coretypes.h to memmodel.h
[thirdparty/gcc.git] / gcc / dwarf2cfi.c
CommitLineData
647a1567 1/* Dwarf2 Call Frame Information helper routines.
818ab71a 2 Copyright (C) 1992-2016 Free Software Foundation, Inc.
647a1567
RH
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
957060b5
AM
23#include "target.h"
24#include "function.h"
25#include "rtl.h"
26#include "tree.h"
27#include "tree-pass.h"
4d0cdd0c 28#include "memmodel.h"
957060b5 29#include "tm_p.h"
957060b5 30#include "emit-rtl.h"
40e23961 31#include "stor-layout.h"
60393bbc 32#include "cfgbuild.h"
647a1567
RH
33#include "dwarf2out.h"
34#include "dwarf2asm.h"
647a1567 35#include "common/common-target.h"
647a1567
RH
36
37#include "except.h" /* expand_builtin_dwarf_sp_column */
38#include "expr.h" /* init_return_column_size */
647a1567
RH
39#include "output.h" /* asm_out_file */
40#include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
41
42
43/* ??? Poison these here until it can be done generically. They've been
44 totally replaced in this file; make sure it stays that way. */
45#undef DWARF2_UNWIND_INFO
46#undef DWARF2_FRAME_INFO
47#if (GCC_VERSION >= 3000)
48 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
49#endif
50
51#ifndef INCOMING_RETURN_ADDR_RTX
52#define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
53#endif
54
647a1567
RH
55/* Maximum size (in bytes) of an artificially generated label. */
56#define MAX_ARTIFICIAL_LABEL_BYTES 30
647a1567 57\f
f17d3401 58/* A collected description of an entire row of the abstract CFI table. */
a79683d5 59struct GTY(()) dw_cfi_row
f17d3401
RH
60{
61 /* The expression that computes the CFA, expressed in two different ways.
62 The CFA member for the simple cases, and the full CFI expression for
63 the complex cases. The later will be a DW_CFA_cfa_expression. */
64 dw_cfa_location cfa;
65 dw_cfi_ref cfa_cfi;
66
67 /* The expressions for any register column that is saved. */
68 cfi_vec reg_save;
a79683d5 69};
f17d3401 70
43215a89 71/* The caller's ORIG_REG is saved in SAVED_IN_REG. */
a79683d5 72struct GTY(()) reg_saved_in_data {
43215a89
RH
73 rtx orig_reg;
74 rtx saved_in_reg;
a79683d5 75};
43215a89 76
43215a89
RH
77
78/* Since we no longer have a proper CFG, we're going to create a facsimile
79 of one on the fly while processing the frame-related insns.
80
829bdd4b
RH
81 We create dw_trace_info structures for each extended basic block beginning
82 and ending at a "save point". Save points are labels, barriers, certain
83 notes, and of course the beginning and end of the function.
43215a89
RH
84
85 As we encounter control transfer insns, we propagate the "current"
829bdd4b
RH
86 row state across the edges to the starts of traces. When checking is
87 enabled, we validate that we propagate the same data from all sources.
43215a89
RH
88
89 All traces are members of the TRACE_INFO array, in the order in which
90 they appear in the instruction stream.
91
829bdd4b
RH
92 All save points are present in the TRACE_INDEX hash, mapping the insn
93 starting a trace to the dw_trace_info describing the trace. */
43215a89 94
a79683d5 95struct dw_trace_info
43215a89 96{
829bdd4b 97 /* The insn that begins the trace. */
7583d99a 98 rtx_insn *head;
43215a89
RH
99
100 /* The row state at the beginning and end of the trace. */
829bdd4b
RH
101 dw_cfi_row *beg_row, *end_row;
102
9a08d230
RH
103 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
104 while scanning insns. However, the args_size value is irrelevant at
105 any point except can_throw_internal_p insns. Therefore the "delay"
106 sizes the values that must actually be emitted for this trace. */
107 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
108 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
109
110 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
dc01c3d1 111 rtx_insn *eh_head;
9a08d230 112
43215a89
RH
113 /* The following variables contain data used in interpreting frame related
114 expressions. These are not part of the "real" row state as defined by
115 Dwarf, but it seems like they need to be propagated into a trace in case
116 frame related expressions have been sunk. */
117 /* ??? This seems fragile. These variables are fragments of a larger
118 expression. If we do not keep the entire expression together, we risk
119 not being able to put it together properly. Consider forcing targets
120 to generate self-contained expressions and dropping all of the magic
121 interpretation code in this file. Or at least refusing to shrink wrap
122 any frame related insn that doesn't contain a complete expression. */
123
124 /* The register used for saving registers to the stack, and its offset
125 from the CFA. */
126 dw_cfa_location cfa_store;
127
128 /* A temporary register holding an integral value used in adjusting SP
129 or setting up the store_reg. The "offset" field holds the integer
130 value, not an offset. */
131 dw_cfa_location cfa_temp;
132
133 /* A set of registers saved in other registers. This is the inverse of
134 the row->reg_save info, if the entry is a DW_CFA_register. This is
135 implemented as a flat array because it normally contains zero or 1
136 entry, depending on the target. IA-64 is the big spender here, using
137 a maximum of 5 entries. */
9771b263 138 vec<reg_saved_in_data> regs_saved_in_regs;
43215a89 139
200e10dc
RH
140 /* An identifier for this trace. Used only for debugging dumps. */
141 unsigned id;
142
143 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
144 bool switch_sections;
9a08d230
RH
145
146 /* True if we've seen different values incoming to beg_true_args_size. */
147 bool args_size_undefined;
a79683d5 148};
43215a89 149
43215a89 150
4a8fb1a1
LC
151/* Hashtable helpers. */
152
8d67ee55 153struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
4a8fb1a1 154{
67f58944
TS
155 static inline hashval_t hash (const dw_trace_info *);
156 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
4a8fb1a1
LC
157};
158
159inline hashval_t
67f58944 160trace_info_hasher::hash (const dw_trace_info *ti)
4a8fb1a1
LC
161{
162 return INSN_UID (ti->head);
163}
164
165inline bool
67f58944 166trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
4a8fb1a1
LC
167{
168 return a->head == b->head;
169}
170
171
43215a89 172/* The variables making up the pseudo-cfg, as described above. */
9771b263 173static vec<dw_trace_info> trace_info;
b01c08c7 174static vec<dw_trace_info *> trace_work_list;
c203e8a7 175static hash_table<trace_info_hasher> *trace_index;
43215a89 176
647a1567
RH
177/* A vector of call frame insns for the CIE. */
178cfi_vec cie_cfi_vec;
179
f17d3401
RH
180/* The state of the first row of the FDE table, which includes the
181 state provided by the CIE. */
ce363ef2 182static GTY(()) dw_cfi_row *cie_cfi_row;
f17d3401 183
43215a89
RH
184static GTY(()) reg_saved_in_data *cie_return_save;
185
647a1567
RH
186static GTY(()) unsigned long dwarf2out_cfi_label_num;
187
bc5612ed 188/* The insn after which a new CFI note should be emitted. */
15f63a9a 189static rtx_insn *add_cfi_insn;
bc5612ed 190
3edb53aa
RH
191/* When non-null, add_cfi will add the CFI to this vector. */
192static cfi_vec *add_cfi_vec;
193
43215a89
RH
194/* The current instruction trace. */
195static dw_trace_info *cur_trace;
196
197/* The current, i.e. most recently generated, row of the CFI table. */
198static dw_cfi_row *cur_row;
199
9a08d230
RH
200/* A copy of the current CFA, for use during the processing of a
201 single insn. */
202static dw_cfa_location *cur_cfa;
203
43215a89
RH
204/* We delay emitting a register save until either (a) we reach the end
205 of the prologue or (b) the register is clobbered. This clusters
206 register saves so that there are fewer pc advances. */
207
a79683d5 208struct queued_reg_save {
43215a89
RH
209 rtx reg;
210 rtx saved_reg;
211 HOST_WIDE_INT cfa_offset;
a79683d5 212};
43215a89 213
43215a89 214
9771b263 215static vec<queued_reg_save> queued_reg_saves;
43215a89 216
bc5612ed
BS
217/* True if any CFI directives were emitted at the current insn. */
218static bool any_cfis_emitted;
4f42d714
RH
219
220/* Short-hand for commonly used register numbers. */
221static unsigned dw_stack_pointer_regnum;
222static unsigned dw_frame_pointer_regnum;
647a1567
RH
223\f
224/* Hook used by __throw. */
225
226rtx
227expand_builtin_dwarf_sp_column (void)
228{
4f42d714 229 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
647a1567
RH
230 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
231}
232
233/* MEM is a memory reference for the register size table, each element of
234 which has mode MODE. Initialize column C as a return address column. */
235
236static void
ef4bddc2 237init_return_column_size (machine_mode mode, rtx mem, unsigned int c)
647a1567
RH
238{
239 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
240 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
69db2d57
RS
241 emit_move_insn (adjust_address (mem, mode, offset),
242 gen_int_mode (size, mode));
647a1567
RH
243}
244
edbbaf3b
OH
245/* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
246 init_one_dwarf_reg_size to communicate on what has been done by the
247 latter. */
248
a79683d5 249struct init_one_dwarf_reg_state
edbbaf3b
OH
250{
251 /* Whether the dwarf return column was initialized. */
252 bool wrote_return_column;
253
254 /* For each hard register REGNO, whether init_one_dwarf_reg_size
255 was given REGNO to process already. */
256 bool processed_regno [FIRST_PSEUDO_REGISTER];
257
a79683d5 258};
edbbaf3b
OH
259
260/* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
261 initialize the dwarf register size table entry corresponding to register
262 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
263 use for the size entry to initialize, and INIT_STATE is the communication
264 datastructure conveying what we're doing to our caller. */
265
266static
267void init_one_dwarf_reg_size (int regno, machine_mode regmode,
268 rtx table, machine_mode slotmode,
269 init_one_dwarf_reg_state *init_state)
270{
271 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
272 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
a66272f6 273 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
edbbaf3b 274
a66272f6 275 const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
edbbaf3b
OH
276 const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
277
278 init_state->processed_regno[regno] = true;
279
280 if (rnum >= DWARF_FRAME_REGISTERS)
281 return;
282
283 if (dnum == DWARF_FRAME_RETURN_COLUMN)
284 {
285 if (regmode == VOIDmode)
286 return;
287 init_state->wrote_return_column = true;
288 }
289
290 if (slotoffset < 0)
291 return;
292
293 emit_move_insn (adjust_address (table, slotmode, slotoffset),
294 gen_int_mode (regsize, slotmode));
295}
296
297/* Generate code to initialize the dwarf register size table located
298 at the provided ADDRESS. */
647a1567
RH
299
300void
301expand_builtin_init_dwarf_reg_sizes (tree address)
302{
303 unsigned int i;
ef4bddc2 304 machine_mode mode = TYPE_MODE (char_type_node);
647a1567
RH
305 rtx addr = expand_normal (address);
306 rtx mem = gen_rtx_MEM (BLKmode, addr);
edbbaf3b
OH
307
308 init_one_dwarf_reg_state init_state;
309
310 memset ((char *)&init_state, 0, sizeof (init_state));
647a1567
RH
311
312 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
313 {
edbbaf3b
OH
314 machine_mode save_mode;
315 rtx span;
647a1567 316
edbbaf3b
OH
317 /* No point in processing a register multiple times. This could happen
318 with register spans, e.g. when a reg is first processed as a piece of
319 a span, then as a register on its own later on. */
320
321 if (init_state.processed_regno[i])
322 continue;
323
324 save_mode = targetm.dwarf_frame_reg_mode (i);
325 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
647a1567 326
edbbaf3b
OH
327 if (!span)
328 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
329 else
330 {
331 for (int si = 0; si < XVECLEN (span, 0); si++)
647a1567 332 {
edbbaf3b 333 rtx reg = XVECEXP (span, 0, si);
647a1567 334
edbbaf3b
OH
335 init_one_dwarf_reg_size
336 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
337 }
647a1567
RH
338 }
339 }
340
edbbaf3b 341 if (!init_state.wrote_return_column)
647a1567
RH
342 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
343
344#ifdef DWARF_ALT_FRAME_RETURN_COLUMN
345 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
346#endif
347
348 targetm.init_dwarf_reg_sizes_extra (address);
349}
350
829bdd4b 351\f
829bdd4b 352static dw_trace_info *
7583d99a 353get_trace_info (rtx_insn *insn)
829bdd4b
RH
354{
355 dw_trace_info dummy;
356 dummy.head = insn;
c203e8a7 357 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
829bdd4b
RH
358}
359
360static bool
7583d99a 361save_point_p (rtx_insn *insn)
829bdd4b
RH
362{
363 /* Labels, except those that are really jump tables. */
364 if (LABEL_P (insn))
365 return inside_basic_block_p (insn);
366
367 /* We split traces at the prologue/epilogue notes because those
368 are points at which the unwind info is usually stable. This
369 makes it easier to find spots with identical unwind info so
370 that we can use remember/restore_state opcodes. */
371 if (NOTE_P (insn))
372 switch (NOTE_KIND (insn))
373 {
374 case NOTE_INSN_PROLOGUE_END:
375 case NOTE_INSN_EPILOGUE_BEG:
376 return true;
377 }
378
379 return false;
380}
381
647a1567
RH
382/* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
383
384static inline HOST_WIDE_INT
385div_data_align (HOST_WIDE_INT off)
386{
387 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
388 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
389 return r;
390}
391
392/* Return true if we need a signed version of a given opcode
393 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
394
395static inline bool
396need_data_align_sf_opcode (HOST_WIDE_INT off)
397{
398 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
399}
400
401/* Return a pointer to a newly allocated Call Frame Instruction. */
402
403static inline dw_cfi_ref
404new_cfi (void)
405{
766090c2 406 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
647a1567
RH
407
408 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
409 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
410
411 return cfi;
412}
413
f17d3401
RH
414/* Return a newly allocated CFI row, with no defined data. */
415
ce363ef2 416static dw_cfi_row *
f17d3401
RH
417new_cfi_row (void)
418{
766090c2 419 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
f17d3401
RH
420
421 row->cfa.reg = INVALID_REGNUM;
422
423 return row;
424}
425
426/* Return a copy of an existing CFI row. */
427
ce363ef2
RH
428static dw_cfi_row *
429copy_cfi_row (dw_cfi_row *src)
f17d3401 430{
766090c2 431 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
f17d3401
RH
432
433 *dst = *src;
9771b263 434 dst->reg_save = vec_safe_copy (src->reg_save);
f17d3401
RH
435
436 return dst;
437}
438
89e25f95 439/* Generate a new label for the CFI info to refer to. */
647a1567
RH
440
441static char *
89e25f95 442dwarf2out_cfi_label (void)
647a1567 443{
89e25f95
BS
444 int num = dwarf2out_cfi_label_num++;
445 char label[20];
647a1567 446
89e25f95 447 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
647a1567 448
89e25f95 449 return xstrdup (label);
647a1567
RH
450}
451
3edb53aa 452/* Add CFI either to the current insn stream or to a vector, or both. */
647a1567
RH
453
454static void
3edb53aa 455add_cfi (dw_cfi_ref cfi)
647a1567 456{
89e25f95 457 any_cfis_emitted = true;
141618e2
RH
458
459 if (add_cfi_insn != NULL)
647a1567 460 {
141618e2
RH
461 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
462 NOTE_CFI (add_cfi_insn) = cfi;
647a1567 463 }
141618e2 464
3edb53aa 465 if (add_cfi_vec != NULL)
9771b263 466 vec_safe_push (*add_cfi_vec, cfi);
647a1567
RH
467}
468
57e16c96
RH
469static void
470add_cfi_args_size (HOST_WIDE_INT size)
471{
472 dw_cfi_ref cfi = new_cfi ();
473
9a08d230
RH
474 /* While we can occasionally have args_size < 0 internally, this state
475 should not persist at a point we actually need an opcode. */
476 gcc_assert (size >= 0);
477
57e16c96
RH
478 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
479 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
480
481 add_cfi (cfi);
482}
483
484static void
485add_cfi_restore (unsigned reg)
486{
487 dw_cfi_ref cfi = new_cfi ();
488
489 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
490 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
491
492 add_cfi (cfi);
493}
494
f1a0e830
RH
495/* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
496 that the register column is no longer saved. */
497
498static void
ce363ef2 499update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
f1a0e830 500{
9771b263
DN
501 if (vec_safe_length (row->reg_save) <= column)
502 vec_safe_grow_cleared (row->reg_save, column + 1);
503 (*row->reg_save)[column] = cfi;
f1a0e830
RH
504}
505
647a1567
RH
506/* This function fills in aa dw_cfa_location structure from a dwarf location
507 descriptor sequence. */
508
509static void
84562394 510get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
647a1567 511{
84562394 512 struct dw_loc_descr_node *ptr;
647a1567
RH
513 cfa->offset = 0;
514 cfa->base_offset = 0;
515 cfa->indirect = 0;
516 cfa->reg = -1;
517
518 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
519 {
520 enum dwarf_location_atom op = ptr->dw_loc_opc;
521
522 switch (op)
523 {
524 case DW_OP_reg0:
525 case DW_OP_reg1:
526 case DW_OP_reg2:
527 case DW_OP_reg3:
528 case DW_OP_reg4:
529 case DW_OP_reg5:
530 case DW_OP_reg6:
531 case DW_OP_reg7:
532 case DW_OP_reg8:
533 case DW_OP_reg9:
534 case DW_OP_reg10:
535 case DW_OP_reg11:
536 case DW_OP_reg12:
537 case DW_OP_reg13:
538 case DW_OP_reg14:
539 case DW_OP_reg15:
540 case DW_OP_reg16:
541 case DW_OP_reg17:
542 case DW_OP_reg18:
543 case DW_OP_reg19:
544 case DW_OP_reg20:
545 case DW_OP_reg21:
546 case DW_OP_reg22:
547 case DW_OP_reg23:
548 case DW_OP_reg24:
549 case DW_OP_reg25:
550 case DW_OP_reg26:
551 case DW_OP_reg27:
552 case DW_OP_reg28:
553 case DW_OP_reg29:
554 case DW_OP_reg30:
555 case DW_OP_reg31:
556 cfa->reg = op - DW_OP_reg0;
557 break;
558 case DW_OP_regx:
559 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
560 break;
561 case DW_OP_breg0:
562 case DW_OP_breg1:
563 case DW_OP_breg2:
564 case DW_OP_breg3:
565 case DW_OP_breg4:
566 case DW_OP_breg5:
567 case DW_OP_breg6:
568 case DW_OP_breg7:
569 case DW_OP_breg8:
570 case DW_OP_breg9:
571 case DW_OP_breg10:
572 case DW_OP_breg11:
573 case DW_OP_breg12:
574 case DW_OP_breg13:
575 case DW_OP_breg14:
576 case DW_OP_breg15:
577 case DW_OP_breg16:
578 case DW_OP_breg17:
579 case DW_OP_breg18:
580 case DW_OP_breg19:
581 case DW_OP_breg20:
582 case DW_OP_breg21:
583 case DW_OP_breg22:
584 case DW_OP_breg23:
585 case DW_OP_breg24:
586 case DW_OP_breg25:
587 case DW_OP_breg26:
588 case DW_OP_breg27:
589 case DW_OP_breg28:
590 case DW_OP_breg29:
591 case DW_OP_breg30:
592 case DW_OP_breg31:
593 cfa->reg = op - DW_OP_breg0;
594 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
595 break;
596 case DW_OP_bregx:
597 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
598 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
599 break;
600 case DW_OP_deref:
601 cfa->indirect = 1;
602 break;
603 case DW_OP_plus_uconst:
604 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
605 break;
606 default:
607 gcc_unreachable ();
608 }
609 }
610}
611
4a8ee122
RH
612/* Find the previous value for the CFA, iteratively. CFI is the opcode
613 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
614 one level of remember/restore state processing. */
647a1567
RH
615
616void
617lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
618{
619 switch (cfi->dw_cfi_opc)
620 {
621 case DW_CFA_def_cfa_offset:
622 case DW_CFA_def_cfa_offset_sf:
623 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
624 break;
625 case DW_CFA_def_cfa_register:
626 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
627 break;
628 case DW_CFA_def_cfa:
629 case DW_CFA_def_cfa_sf:
630 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
631 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
632 break;
633 case DW_CFA_def_cfa_expression:
634 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
635 break;
636
637 case DW_CFA_remember_state:
638 gcc_assert (!remember->in_use);
639 *remember = *loc;
640 remember->in_use = 1;
641 break;
642 case DW_CFA_restore_state:
643 gcc_assert (remember->in_use);
644 *loc = *remember;
645 remember->in_use = 0;
646 break;
647
648 default:
649 break;
650 }
651}
652
647a1567
RH
653/* Determine if two dw_cfa_location structures define the same data. */
654
655bool
656cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
657{
658 return (loc1->reg == loc2->reg
659 && loc1->offset == loc2->offset
660 && loc1->indirect == loc2->indirect
661 && (loc1->indirect == 0
662 || loc1->base_offset == loc2->base_offset));
663}
664
57e16c96 665/* Determine if two CFI operands are identical. */
647a1567 666
57e16c96
RH
667static bool
668cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
647a1567 669{
57e16c96
RH
670 switch (t)
671 {
672 case dw_cfi_oprnd_unused:
673 return true;
674 case dw_cfi_oprnd_reg_num:
675 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
676 case dw_cfi_oprnd_offset:
677 return a->dw_cfi_offset == b->dw_cfi_offset;
678 case dw_cfi_oprnd_addr:
679 return (a->dw_cfi_addr == b->dw_cfi_addr
680 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
681 case dw_cfi_oprnd_loc:
682 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
683 }
684 gcc_unreachable ();
685}
647a1567 686
57e16c96
RH
687/* Determine if two CFI entries are identical. */
688
689static bool
690cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
691{
692 enum dwarf_call_frame_info opc;
693
694 /* Make things easier for our callers, including missing operands. */
695 if (a == b)
696 return true;
697 if (a == NULL || b == NULL)
698 return false;
699
700 /* Obviously, the opcodes must match. */
701 opc = a->dw_cfi_opc;
702 if (opc != b->dw_cfi_opc)
703 return false;
704
705 /* Compare the two operands, re-using the type of the operands as
706 already exposed elsewhere. */
707 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
708 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
709 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
710 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
711}
712
829bdd4b
RH
713/* Determine if two CFI_ROW structures are identical. */
714
715static bool
716cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
717{
718 size_t i, n_a, n_b, n_max;
719
720 if (a->cfa_cfi)
721 {
722 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
723 return false;
724 }
725 else if (!cfa_equal_p (&a->cfa, &b->cfa))
726 return false;
727
9771b263
DN
728 n_a = vec_safe_length (a->reg_save);
729 n_b = vec_safe_length (b->reg_save);
829bdd4b
RH
730 n_max = MAX (n_a, n_b);
731
732 for (i = 0; i < n_max; ++i)
733 {
734 dw_cfi_ref r_a = NULL, r_b = NULL;
735
736 if (i < n_a)
9771b263 737 r_a = (*a->reg_save)[i];
829bdd4b 738 if (i < n_b)
9771b263 739 r_b = (*b->reg_save)[i];
829bdd4b
RH
740
741 if (!cfi_equal_p (r_a, r_b))
742 return false;
743 }
744
745 return true;
746}
747
57e16c96
RH
748/* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
749 what opcode to emit. Returns the CFI opcode to effect the change, or
750 NULL if NEW_CFA == OLD_CFA. */
751
752static dw_cfi_ref
753def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
754{
755 dw_cfi_ref cfi;
647a1567 756
647a1567 757 /* If nothing changed, no need to issue any call frame instructions. */
57e16c96
RH
758 if (cfa_equal_p (old_cfa, new_cfa))
759 return NULL;
647a1567
RH
760
761 cfi = new_cfi ();
762
57e16c96 763 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
647a1567
RH
764 {
765 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
766 the CFA register did not change but the offset did. The data
767 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
768 in the assembler via the .cfi_def_cfa_offset directive. */
57e16c96 769 if (new_cfa->offset < 0)
647a1567
RH
770 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
771 else
772 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
57e16c96 773 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
647a1567 774 }
57e16c96
RH
775 else if (new_cfa->offset == old_cfa->offset
776 && old_cfa->reg != INVALID_REGNUM
777 && !new_cfa->indirect
778 && !old_cfa->indirect)
647a1567
RH
779 {
780 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
781 indicating the CFA register has changed to <register> but the
782 offset has not changed. */
783 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
57e16c96 784 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
647a1567 785 }
57e16c96 786 else if (new_cfa->indirect == 0)
647a1567
RH
787 {
788 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
789 indicating the CFA register has changed to <register> with
790 the specified offset. The data factoring for DW_CFA_def_cfa_sf
791 happens in output_cfi, or in the assembler via the .cfi_def_cfa
792 directive. */
57e16c96 793 if (new_cfa->offset < 0)
647a1567
RH
794 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
795 else
796 cfi->dw_cfi_opc = DW_CFA_def_cfa;
57e16c96
RH
797 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
798 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
647a1567
RH
799 }
800 else
801 {
802 /* Construct a DW_CFA_def_cfa_expression instruction to
803 calculate the CFA using a full location expression since no
804 register-offset pair is available. */
84562394 805 struct dw_loc_descr_node *loc_list;
647a1567
RH
806
807 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
57e16c96 808 loc_list = build_cfa_loc (new_cfa, 0);
647a1567
RH
809 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
810 }
811
57e16c96
RH
812 return cfi;
813}
814
815/* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
816
817static void
818def_cfa_1 (dw_cfa_location *new_cfa)
819{
820 dw_cfi_ref cfi;
821
43215a89
RH
822 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
823 cur_trace->cfa_store.offset = new_cfa->offset;
57e16c96
RH
824
825 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
826 if (cfi)
827 {
828 cur_row->cfa = *new_cfa;
8f1594b2
RH
829 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
830 ? cfi : NULL);
57e16c96
RH
831
832 add_cfi (cfi);
833 }
647a1567
RH
834}
835
836/* Add the CFI for saving a register. REG is the CFA column number.
647a1567
RH
837 If SREG is -1, the register is saved at OFFSET from the CFA;
838 otherwise it is saved in SREG. */
839
840static void
3edb53aa 841reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
647a1567 842{
3edb53aa 843 dw_fde_ref fde = cfun ? cfun->fde : NULL;
647a1567 844 dw_cfi_ref cfi = new_cfi ();
647a1567
RH
845
846 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
847
848 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
849 if (fde
850 && fde->stack_realign
851 && sreg == INVALID_REGNUM)
852 {
853 cfi->dw_cfi_opc = DW_CFA_expression;
854 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
855 cfi->dw_cfi_oprnd2.dw_cfi_loc
f17d3401
RH
856 = build_cfa_aligned_loc (&cur_row->cfa, offset,
857 fde->stack_realignment);
647a1567
RH
858 }
859 else if (sreg == INVALID_REGNUM)
860 {
861 if (need_data_align_sf_opcode (offset))
862 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
863 else if (reg & ~0x3f)
864 cfi->dw_cfi_opc = DW_CFA_offset_extended;
865 else
866 cfi->dw_cfi_opc = DW_CFA_offset;
867 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
868 }
869 else if (sreg == reg)
f1a0e830
RH
870 {
871 /* While we could emit something like DW_CFA_same_value or
872 DW_CFA_restore, we never expect to see something like that
873 in a prologue. This is more likely to be a bug. A backend
874 can always bypass this by using REG_CFA_RESTORE directly. */
875 gcc_unreachable ();
876 }
647a1567
RH
877 else
878 {
879 cfi->dw_cfi_opc = DW_CFA_register;
880 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
881 }
882
3edb53aa 883 add_cfi (cfi);
f1a0e830 884 update_row_reg_save (cur_row, reg, cfi);
647a1567
RH
885}
886
9a08d230
RH
887/* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
888 and adjust data structures to match. */
647a1567
RH
889
890static void
23f57f5a 891notice_args_size (rtx_insn *insn)
647a1567 892{
9a08d230
RH
893 HOST_WIDE_INT args_size, delta;
894 rtx note;
647a1567 895
9a08d230
RH
896 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
897 if (note == NULL)
898 return;
f17d3401 899
9a08d230
RH
900 args_size = INTVAL (XEXP (note, 0));
901 delta = args_size - cur_trace->end_true_args_size;
902 if (delta == 0)
903 return;
647a1567 904
9a08d230 905 cur_trace->end_true_args_size = args_size;
43215a89 906
9a08d230
RH
907 /* If the CFA is computed off the stack pointer, then we must adjust
908 the computation of the CFA as well. */
909 if (cur_cfa->reg == dw_stack_pointer_regnum)
910 {
911 gcc_assert (!cur_cfa->indirect);
647a1567 912
9a08d230
RH
913 /* Convert a change in args_size (always a positive in the
914 direction of stack growth) to a change in stack pointer. */
581edfa3
TS
915 if (!STACK_GROWS_DOWNWARD)
916 delta = -delta;
917
9a08d230
RH
918 cur_cfa->offset += delta;
919 }
647a1567
RH
920}
921
9a08d230
RH
922/* A subroutine of scan_trace. INSN is can_throw_internal. Update the
923 data within the trace related to EH insns and args_size. */
647a1567
RH
924
925static void
dc01c3d1 926notice_eh_throw (rtx_insn *insn)
647a1567 927{
9a08d230 928 HOST_WIDE_INT args_size;
647a1567 929
9a08d230
RH
930 args_size = cur_trace->end_true_args_size;
931 if (cur_trace->eh_head == NULL)
647a1567 932 {
9a08d230
RH
933 cur_trace->eh_head = insn;
934 cur_trace->beg_delay_args_size = args_size;
935 cur_trace->end_delay_args_size = args_size;
647a1567 936 }
9a08d230 937 else if (cur_trace->end_delay_args_size != args_size)
647a1567 938 {
9a08d230 939 cur_trace->end_delay_args_size = args_size;
647a1567 940
9a08d230
RH
941 /* ??? If the CFA is the stack pointer, search backward for the last
942 CFI note and insert there. Given that the stack changed for the
943 args_size change, there *must* be such a note in between here and
944 the last eh insn. */
945 add_cfi_args_size (args_size);
946 }
647a1567
RH
947}
948
7263c6d7 949/* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
4f42d714
RH
950/* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
951 used in places where rtl is prohibited. */
7263c6d7
RH
952
953static inline unsigned
954dwf_regno (const_rtx reg)
955{
362805fc 956 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
4f42d714 957 return DWARF_FRAME_REGNUM (REGNO (reg));
7263c6d7
RH
958}
959
647a1567
RH
960/* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
961
962static bool
963compare_reg_or_pc (rtx x, rtx y)
964{
965 if (REG_P (x) && REG_P (y))
966 return REGNO (x) == REGNO (y);
967 return x == y;
968}
969
970/* Record SRC as being saved in DEST. DEST may be null to delete an
971 existing entry. SRC may be a register or PC_RTX. */
972
973static void
974record_reg_saved_in_reg (rtx dest, rtx src)
975{
976 reg_saved_in_data *elt;
977 size_t i;
978
9771b263 979 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
647a1567
RH
980 if (compare_reg_or_pc (elt->orig_reg, src))
981 {
982 if (dest == NULL)
9771b263 983 cur_trace->regs_saved_in_regs.unordered_remove (i);
647a1567
RH
984 else
985 elt->saved_in_reg = dest;
986 return;
987 }
988
989 if (dest == NULL)
990 return;
991
f32682ca 992 reg_saved_in_data e = {src, dest};
9771b263 993 cur_trace->regs_saved_in_regs.safe_push (e);
647a1567
RH
994}
995
647a1567
RH
996/* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
997 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
998
999static void
89e25f95 1000queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
647a1567 1001{
999646c1 1002 queued_reg_save *q;
f32682ca 1003 queued_reg_save e = {reg, sreg, offset};
999646c1 1004 size_t i;
647a1567
RH
1005
1006 /* Duplicates waste space, but it's also necessary to remove them
a8e5c0e7 1007 for correctness, since the queue gets output in reverse order. */
9771b263 1008 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
a8e5c0e7 1009 if (compare_reg_or_pc (q->reg, reg))
f32682ca
DN
1010 {
1011 *q = e;
1012 return;
1013 }
647a1567 1014
9771b263 1015 queued_reg_saves.safe_push (e);
647a1567
RH
1016}
1017
1018/* Output all the entries in QUEUED_REG_SAVES. */
1019
1020static void
1021dwarf2out_flush_queued_reg_saves (void)
1022{
999646c1
RH
1023 queued_reg_save *q;
1024 size_t i;
647a1567 1025
9771b263 1026 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
647a1567
RH
1027 {
1028 unsigned int reg, sreg;
1029
1030 record_reg_saved_in_reg (q->saved_reg, q->reg);
1031
a8e5c0e7
RH
1032 if (q->reg == pc_rtx)
1033 reg = DWARF_FRAME_RETURN_COLUMN;
1034 else
7263c6d7 1035 reg = dwf_regno (q->reg);
647a1567 1036 if (q->saved_reg)
7263c6d7 1037 sreg = dwf_regno (q->saved_reg);
647a1567
RH
1038 else
1039 sreg = INVALID_REGNUM;
3edb53aa 1040 reg_save (reg, sreg, q->cfa_offset);
647a1567
RH
1041 }
1042
9771b263 1043 queued_reg_saves.truncate (0);
647a1567
RH
1044}
1045
1046/* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1047 location for? Or, does it clobber a register which we've previously
1048 said that some other register is saved in, and for which we now
1049 have a new location for? */
1050
1051static bool
1052clobbers_queued_reg_save (const_rtx insn)
1053{
999646c1
RH
1054 queued_reg_save *q;
1055 size_t iq;
647a1567 1056
9771b263 1057 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
647a1567 1058 {
999646c1 1059 size_t ir;
647a1567
RH
1060 reg_saved_in_data *rir;
1061
1062 if (modified_in_p (q->reg, insn))
1063 return true;
1064
9771b263 1065 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
647a1567
RH
1066 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1067 && modified_in_p (rir->saved_in_reg, insn))
1068 return true;
1069 }
1070
1071 return false;
1072}
1073
1074/* What register, if any, is currently saved in REG? */
1075
1076static rtx
1077reg_saved_in (rtx reg)
1078{
1079 unsigned int regn = REGNO (reg);
999646c1 1080 queued_reg_save *q;
647a1567
RH
1081 reg_saved_in_data *rir;
1082 size_t i;
1083
9771b263 1084 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
647a1567
RH
1085 if (q->saved_reg && regn == REGNO (q->saved_reg))
1086 return q->reg;
1087
9771b263 1088 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
647a1567
RH
1089 if (regn == REGNO (rir->saved_in_reg))
1090 return rir->orig_reg;
1091
1092 return NULL_RTX;
1093}
1094
647a1567
RH
1095/* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1096
1097static void
89e25f95 1098dwarf2out_frame_debug_def_cfa (rtx pat)
647a1567 1099{
9a08d230 1100 memset (cur_cfa, 0, sizeof (*cur_cfa));
647a1567 1101
8f1594b2 1102 if (GET_CODE (pat) == PLUS)
647a1567 1103 {
9a08d230 1104 cur_cfa->offset = INTVAL (XEXP (pat, 1));
8f1594b2
RH
1105 pat = XEXP (pat, 0);
1106 }
1107 if (MEM_P (pat))
1108 {
9a08d230 1109 cur_cfa->indirect = 1;
647a1567
RH
1110 pat = XEXP (pat, 0);
1111 if (GET_CODE (pat) == PLUS)
1112 {
9a08d230 1113 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
647a1567
RH
1114 pat = XEXP (pat, 0);
1115 }
647a1567 1116 }
8f1594b2
RH
1117 /* ??? If this fails, we could be calling into the _loc functions to
1118 define a full expression. So far no port does that. */
1119 gcc_assert (REG_P (pat));
9a08d230 1120 cur_cfa->reg = dwf_regno (pat);
647a1567
RH
1121}
1122
1123/* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1124
1125static void
89e25f95 1126dwarf2out_frame_debug_adjust_cfa (rtx pat)
647a1567
RH
1127{
1128 rtx src, dest;
1129
1130 gcc_assert (GET_CODE (pat) == SET);
1131 dest = XEXP (pat, 0);
1132 src = XEXP (pat, 1);
1133
1134 switch (GET_CODE (src))
1135 {
1136 case PLUS:
9a08d230
RH
1137 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1138 cur_cfa->offset -= INTVAL (XEXP (src, 1));
647a1567
RH
1139 break;
1140
1141 case REG:
9a08d230 1142 break;
647a1567
RH
1143
1144 default:
9a08d230 1145 gcc_unreachable ();
647a1567
RH
1146 }
1147
9a08d230
RH
1148 cur_cfa->reg = dwf_regno (dest);
1149 gcc_assert (cur_cfa->indirect == 0);
647a1567
RH
1150}
1151
1152/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1153
1154static void
89e25f95 1155dwarf2out_frame_debug_cfa_offset (rtx set)
647a1567
RH
1156{
1157 HOST_WIDE_INT offset;
1158 rtx src, addr, span;
1159 unsigned int sregno;
1160
1161 src = XEXP (set, 1);
1162 addr = XEXP (set, 0);
1163 gcc_assert (MEM_P (addr));
1164 addr = XEXP (addr, 0);
1165
1166 /* As documented, only consider extremely simple addresses. */
1167 switch (GET_CODE (addr))
1168 {
1169 case REG:
9a08d230
RH
1170 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1171 offset = -cur_cfa->offset;
647a1567
RH
1172 break;
1173 case PLUS:
9a08d230
RH
1174 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1175 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
647a1567
RH
1176 break;
1177 default:
1178 gcc_unreachable ();
1179 }
1180
1181 if (src == pc_rtx)
1182 {
1183 span = NULL;
1184 sregno = DWARF_FRAME_RETURN_COLUMN;
1185 }
43215a89 1186 else
647a1567
RH
1187 {
1188 span = targetm.dwarf_register_span (src);
7263c6d7 1189 sregno = dwf_regno (src);
647a1567
RH
1190 }
1191
1192 /* ??? We'd like to use queue_reg_save, but we need to come up with
1193 a different flushing heuristic for epilogues. */
1194 if (!span)
3edb53aa 1195 reg_save (sregno, INVALID_REGNUM, offset);
647a1567
RH
1196 else
1197 {
1198 /* We have a PARALLEL describing where the contents of SRC live.
a4d47cac 1199 Adjust the offset for each piece of the PARALLEL. */
647a1567
RH
1200 HOST_WIDE_INT span_offset = offset;
1201
1202 gcc_assert (GET_CODE (span) == PARALLEL);
1203
a4d47cac
EB
1204 const int par_len = XVECLEN (span, 0);
1205 for (int par_index = 0; par_index < par_len; par_index++)
647a1567
RH
1206 {
1207 rtx elem = XVECEXP (span, 0, par_index);
7263c6d7 1208 sregno = dwf_regno (src);
3edb53aa 1209 reg_save (sregno, INVALID_REGNUM, span_offset);
647a1567
RH
1210 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1211 }
1212 }
1213}
1214
1215/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1216
1217static void
89e25f95 1218dwarf2out_frame_debug_cfa_register (rtx set)
647a1567
RH
1219{
1220 rtx src, dest;
1221 unsigned sregno, dregno;
1222
1223 src = XEXP (set, 1);
1224 dest = XEXP (set, 0);
1225
a8e5c0e7 1226 record_reg_saved_in_reg (dest, src);
647a1567
RH
1227 if (src == pc_rtx)
1228 sregno = DWARF_FRAME_RETURN_COLUMN;
1229 else
7263c6d7 1230 sregno = dwf_regno (src);
647a1567 1231
7263c6d7 1232 dregno = dwf_regno (dest);
647a1567
RH
1233
1234 /* ??? We'd like to use queue_reg_save, but we need to come up with
1235 a different flushing heuristic for epilogues. */
3edb53aa 1236 reg_save (sregno, dregno, 0);
647a1567
RH
1237}
1238
1239/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1240
1241static void
89e25f95 1242dwarf2out_frame_debug_cfa_expression (rtx set)
647a1567
RH
1243{
1244 rtx src, dest, span;
1245 dw_cfi_ref cfi = new_cfi ();
f1a0e830 1246 unsigned regno;
647a1567
RH
1247
1248 dest = SET_DEST (set);
1249 src = SET_SRC (set);
1250
1251 gcc_assert (REG_P (src));
1252 gcc_assert (MEM_P (dest));
1253
1254 span = targetm.dwarf_register_span (src);
1255 gcc_assert (!span);
1256
f1a0e830
RH
1257 regno = dwf_regno (src);
1258
647a1567 1259 cfi->dw_cfi_opc = DW_CFA_expression;
f1a0e830 1260 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
647a1567
RH
1261 cfi->dw_cfi_oprnd2.dw_cfi_loc
1262 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1263 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1264
1265 /* ??? We'd like to use queue_reg_save, were the interface different,
1266 and, as above, we could manage flushing for epilogues. */
3edb53aa 1267 add_cfi (cfi);
f1a0e830 1268 update_row_reg_save (cur_row, regno, cfi);
647a1567
RH
1269}
1270
1271/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1272
1273static void
89e25f95 1274dwarf2out_frame_debug_cfa_restore (rtx reg)
647a1567 1275{
a4d47cac
EB
1276 gcc_assert (REG_P (reg));
1277
1278 rtx span = targetm.dwarf_register_span (reg);
1279 if (!span)
1280 {
1281 unsigned int regno = dwf_regno (reg);
1282 add_cfi_restore (regno);
1283 update_row_reg_save (cur_row, regno, NULL);
1284 }
1285 else
1286 {
1287 /* We have a PARALLEL describing where the contents of REG live.
1288 Restore the register for each piece of the PARALLEL. */
1289 gcc_assert (GET_CODE (span) == PARALLEL);
647a1567 1290
a4d47cac
EB
1291 const int par_len = XVECLEN (span, 0);
1292 for (int par_index = 0; par_index < par_len; par_index++)
1293 {
1294 reg = XVECEXP (span, 0, par_index);
1295 gcc_assert (REG_P (reg));
1296 unsigned int regno = dwf_regno (reg);
1297 add_cfi_restore (regno);
1298 update_row_reg_save (cur_row, regno, NULL);
1299 }
1300 }
647a1567
RH
1301}
1302
1303/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1304 ??? Perhaps we should note in the CIE where windows are saved (instead of
1305 assuming 0(cfa)) and what registers are in the window. */
1306
1307static void
89e25f95 1308dwarf2out_frame_debug_cfa_window_save (void)
647a1567
RH
1309{
1310 dw_cfi_ref cfi = new_cfi ();
1311
1312 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
3edb53aa 1313 add_cfi (cfi);
647a1567
RH
1314}
1315
1316/* Record call frame debugging information for an expression EXPR,
1317 which either sets SP or FP (adjusting how we calculate the frame
1318 address) or saves a register to the stack or another register.
1319 LABEL indicates the address of EXPR.
1320
1321 This function encodes a state machine mapping rtxes to actions on
1322 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1323 users need not read the source code.
1324
1325 The High-Level Picture
1326
1327 Changes in the register we use to calculate the CFA: Currently we
1328 assume that if you copy the CFA register into another register, we
1329 should take the other one as the new CFA register; this seems to
1330 work pretty well. If it's wrong for some target, it's simple
1331 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1332
1333 Changes in the register we use for saving registers to the stack:
1334 This is usually SP, but not always. Again, we deduce that if you
1335 copy SP into another register (and SP is not the CFA register),
1336 then the new register is the one we will be using for register
1337 saves. This also seems to work.
1338
1339 Register saves: There's not much guesswork about this one; if
1340 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1341 register save, and the register used to calculate the destination
1342 had better be the one we think we're using for this purpose.
1343 It's also assumed that a copy from a call-saved register to another
1344 register is saving that register if RTX_FRAME_RELATED_P is set on
1345 that instruction. If the copy is from a call-saved register to
1346 the *same* register, that means that the register is now the same
1347 value as in the caller.
1348
1349 Except: If the register being saved is the CFA register, and the
1350 offset is nonzero, we are saving the CFA, so we assume we have to
1351 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1352 the intent is to save the value of SP from the previous frame.
1353
1354 In addition, if a register has previously been saved to a different
1355 register,
1356
1357 Invariants / Summaries of Rules
1358
1359 cfa current rule for calculating the CFA. It usually
f17d3401 1360 consists of a register and an offset. This is
9a08d230 1361 actually stored in *cur_cfa, but abbreviated
f17d3401 1362 for the purposes of this documentation.
647a1567
RH
1363 cfa_store register used by prologue code to save things to the stack
1364 cfa_store.offset is the offset from the value of
1365 cfa_store.reg to the actual CFA
1366 cfa_temp register holding an integral value. cfa_temp.offset
1367 stores the value, which will be used to adjust the
1368 stack pointer. cfa_temp is also used like cfa_store,
1369 to track stores to the stack via fp or a temp reg.
1370
1371 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1372 with cfa.reg as the first operand changes the cfa.reg and its
1373 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1374 cfa_temp.offset.
1375
1376 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1377 expression yielding a constant. This sets cfa_temp.reg
1378 and cfa_temp.offset.
1379
1380 Rule 5: Create a new register cfa_store used to save items to the
1381 stack.
1382
1383 Rules 10-14: Save a register to the stack. Define offset as the
1384 difference of the original location and cfa_store's
1385 location (or cfa_temp's location if cfa_temp is used).
1386
1387 Rules 16-20: If AND operation happens on sp in prologue, we assume
1388 stack is realigned. We will use a group of DW_OP_XXX
1389 expressions to represent the location of the stored
1390 register instead of CFA+offset.
1391
1392 The Rules
1393
1394 "{a,b}" indicates a choice of a xor b.
1395 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1396
1397 Rule 1:
1398 (set <reg1> <reg2>:cfa.reg)
1399 effects: cfa.reg = <reg1>
1400 cfa.offset unchanged
1401 cfa_temp.reg = <reg1>
1402 cfa_temp.offset = cfa.offset
1403
1404 Rule 2:
1405 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1406 {<const_int>,<reg>:cfa_temp.reg}))
1407 effects: cfa.reg = sp if fp used
1408 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1409 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1410 if cfa_store.reg==sp
1411
1412 Rule 3:
1413 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1414 effects: cfa.reg = fp
1415 cfa_offset += +/- <const_int>
1416
1417 Rule 4:
1418 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1419 constraints: <reg1> != fp
1420 <reg1> != sp
1421 effects: cfa.reg = <reg1>
1422 cfa_temp.reg = <reg1>
1423 cfa_temp.offset = cfa.offset
1424
1425 Rule 5:
1426 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1427 constraints: <reg1> != fp
1428 <reg1> != sp
1429 effects: cfa_store.reg = <reg1>
1430 cfa_store.offset = cfa.offset - cfa_temp.offset
1431
1432 Rule 6:
1433 (set <reg> <const_int>)
1434 effects: cfa_temp.reg = <reg>
1435 cfa_temp.offset = <const_int>
1436
1437 Rule 7:
1438 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1439 effects: cfa_temp.reg = <reg1>
1440 cfa_temp.offset |= <const_int>
1441
1442 Rule 8:
1443 (set <reg> (high <exp>))
1444 effects: none
1445
1446 Rule 9:
1447 (set <reg> (lo_sum <exp> <const_int>))
1448 effects: cfa_temp.reg = <reg>
1449 cfa_temp.offset = <const_int>
1450
1451 Rule 10:
1452 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1453 effects: cfa_store.offset -= <const_int>
1454 cfa.offset = cfa_store.offset if cfa.reg == sp
1455 cfa.reg = sp
1456 cfa.base_offset = -cfa_store.offset
1457
1458 Rule 11:
1459 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1460 effects: cfa_store.offset += -/+ mode_size(mem)
1461 cfa.offset = cfa_store.offset if cfa.reg == sp
1462 cfa.reg = sp
1463 cfa.base_offset = -cfa_store.offset
1464
1465 Rule 12:
1466 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1467
1468 <reg2>)
1469 effects: cfa.reg = <reg1>
1470 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1471
1472 Rule 13:
1473 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1474 effects: cfa.reg = <reg1>
1475 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1476
1477 Rule 14:
1478 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1479 effects: cfa.reg = <reg1>
1480 cfa.base_offset = -cfa_temp.offset
1481 cfa_temp.offset -= mode_size(mem)
1482
1483 Rule 15:
1484 (set <reg> {unspec, unspec_volatile})
1485 effects: target-dependent
1486
1487 Rule 16:
1488 (set sp (and: sp <const_int>))
1489 constraints: cfa_store.reg == sp
a518b996 1490 effects: cfun->fde.stack_realign = 1
647a1567
RH
1491 cfa_store.offset = 0
1492 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1493
1494 Rule 17:
1495 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1496 effects: cfa_store.offset += -/+ mode_size(mem)
1497
1498 Rule 18:
1499 (set (mem ({pre_inc, pre_dec} sp)) fp)
1500 constraints: fde->stack_realign == 1
1501 effects: cfa_store.offset = 0
1502 cfa.reg != HARD_FRAME_POINTER_REGNUM
1503
1504 Rule 19:
1505 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1506 constraints: fde->stack_realign == 1
1507 && cfa.offset == 0
1508 && cfa.indirect == 0
1509 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1510 effects: Use DW_CFA_def_cfa_expression to define cfa
1511 cfa.reg == fde->drap_reg */
1512
1513static void
89e25f95 1514dwarf2out_frame_debug_expr (rtx expr)
647a1567
RH
1515{
1516 rtx src, dest, span;
1517 HOST_WIDE_INT offset;
1518 dw_fde_ref fde;
1519
1520 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1521 the PARALLEL independently. The first element is always processed if
1522 it is a SET. This is for backward compatibility. Other elements
1523 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1524 flag is set in them. */
1525 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1526 {
1527 int par_index;
1528 int limit = XVECLEN (expr, 0);
1529 rtx elem;
1530
1531 /* PARALLELs have strict read-modify-write semantics, so we
1532 ought to evaluate every rvalue before changing any lvalue.
1533 It's cumbersome to do that in general, but there's an
1534 easy approximation that is enough for all current users:
1535 handle register saves before register assignments. */
1536 if (GET_CODE (expr) == PARALLEL)
1537 for (par_index = 0; par_index < limit; par_index++)
1538 {
1539 elem = XVECEXP (expr, 0, par_index);
1540 if (GET_CODE (elem) == SET
1541 && MEM_P (SET_DEST (elem))
1542 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
89e25f95 1543 dwarf2out_frame_debug_expr (elem);
647a1567
RH
1544 }
1545
1546 for (par_index = 0; par_index < limit; par_index++)
1547 {
1548 elem = XVECEXP (expr, 0, par_index);
1549 if (GET_CODE (elem) == SET
1550 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1551 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
89e25f95 1552 dwarf2out_frame_debug_expr (elem);
647a1567
RH
1553 }
1554 return;
1555 }
1556
1557 gcc_assert (GET_CODE (expr) == SET);
1558
1559 src = SET_SRC (expr);
1560 dest = SET_DEST (expr);
1561
1562 if (REG_P (src))
1563 {
1564 rtx rsi = reg_saved_in (src);
1565 if (rsi)
1566 src = rsi;
1567 }
1568
a518b996 1569 fde = cfun->fde;
647a1567
RH
1570
1571 switch (GET_CODE (dest))
1572 {
1573 case REG:
1574 switch (GET_CODE (src))
1575 {
1576 /* Setting FP from SP. */
1577 case REG:
9a08d230 1578 if (cur_cfa->reg == dwf_regno (src))
647a1567
RH
1579 {
1580 /* Rule 1 */
1581 /* Update the CFA rule wrt SP or FP. Make sure src is
1582 relative to the current CFA register.
1583
1584 We used to require that dest be either SP or FP, but the
1585 ARM copies SP to a temporary register, and from there to
1586 FP. So we just rely on the backends to only set
1587 RTX_FRAME_RELATED_P on appropriate insns. */
9a08d230
RH
1588 cur_cfa->reg = dwf_regno (dest);
1589 cur_trace->cfa_temp.reg = cur_cfa->reg;
1590 cur_trace->cfa_temp.offset = cur_cfa->offset;
647a1567
RH
1591 }
1592 else
1593 {
1594 /* Saving a register in a register. */
1595 gcc_assert (!fixed_regs [REGNO (dest)]
1596 /* For the SPARC and its register window. */
7263c6d7 1597 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
647a1567
RH
1598
1599 /* After stack is aligned, we can only save SP in FP
1600 if drap register is used. In this case, we have
1601 to restore stack pointer with the CFA value and we
1602 don't generate this DWARF information. */
1603 if (fde
1604 && fde->stack_realign
1605 && REGNO (src) == STACK_POINTER_REGNUM)
1606 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1607 && fde->drap_reg != INVALID_REGNUM
9a08d230 1608 && cur_cfa->reg != dwf_regno (src));
647a1567 1609 else
89e25f95 1610 queue_reg_save (src, dest, 0);
647a1567
RH
1611 }
1612 break;
1613
1614 case PLUS:
1615 case MINUS:
1616 case LO_SUM:
1617 if (dest == stack_pointer_rtx)
1618 {
1619 /* Rule 2 */
1620 /* Adjusting SP. */
1621 switch (GET_CODE (XEXP (src, 1)))
1622 {
1623 case CONST_INT:
1624 offset = INTVAL (XEXP (src, 1));
1625 break;
1626 case REG:
43215a89
RH
1627 gcc_assert (dwf_regno (XEXP (src, 1))
1628 == cur_trace->cfa_temp.reg);
1629 offset = cur_trace->cfa_temp.offset;
647a1567
RH
1630 break;
1631 default:
1632 gcc_unreachable ();
1633 }
1634
1635 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1636 {
1637 /* Restoring SP from FP in the epilogue. */
9a08d230
RH
1638 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1639 cur_cfa->reg = dw_stack_pointer_regnum;
647a1567
RH
1640 }
1641 else if (GET_CODE (src) == LO_SUM)
1642 /* Assume we've set the source reg of the LO_SUM from sp. */
1643 ;
1644 else
1645 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1646
1647 if (GET_CODE (src) != MINUS)
1648 offset = -offset;
9a08d230
RH
1649 if (cur_cfa->reg == dw_stack_pointer_regnum)
1650 cur_cfa->offset += offset;
43215a89
RH
1651 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1652 cur_trace->cfa_store.offset += offset;
647a1567
RH
1653 }
1654 else if (dest == hard_frame_pointer_rtx)
1655 {
1656 /* Rule 3 */
1657 /* Either setting the FP from an offset of the SP,
1658 or adjusting the FP */
1659 gcc_assert (frame_pointer_needed);
1660
1661 gcc_assert (REG_P (XEXP (src, 0))
9a08d230 1662 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
647a1567
RH
1663 && CONST_INT_P (XEXP (src, 1)));
1664 offset = INTVAL (XEXP (src, 1));
1665 if (GET_CODE (src) != MINUS)
1666 offset = -offset;
9a08d230
RH
1667 cur_cfa->offset += offset;
1668 cur_cfa->reg = dw_frame_pointer_regnum;
647a1567
RH
1669 }
1670 else
1671 {
1672 gcc_assert (GET_CODE (src) != MINUS);
1673
1674 /* Rule 4 */
1675 if (REG_P (XEXP (src, 0))
9a08d230 1676 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
647a1567
RH
1677 && CONST_INT_P (XEXP (src, 1)))
1678 {
1679 /* Setting a temporary CFA register that will be copied
1680 into the FP later on. */
1681 offset = - INTVAL (XEXP (src, 1));
9a08d230
RH
1682 cur_cfa->offset += offset;
1683 cur_cfa->reg = dwf_regno (dest);
647a1567 1684 /* Or used to save regs to the stack. */
9a08d230
RH
1685 cur_trace->cfa_temp.reg = cur_cfa->reg;
1686 cur_trace->cfa_temp.offset = cur_cfa->offset;
647a1567
RH
1687 }
1688
1689 /* Rule 5 */
1690 else if (REG_P (XEXP (src, 0))
43215a89 1691 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
647a1567
RH
1692 && XEXP (src, 1) == stack_pointer_rtx)
1693 {
1694 /* Setting a scratch register that we will use instead
1695 of SP for saving registers to the stack. */
9a08d230 1696 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
43215a89
RH
1697 cur_trace->cfa_store.reg = dwf_regno (dest);
1698 cur_trace->cfa_store.offset
9a08d230 1699 = cur_cfa->offset - cur_trace->cfa_temp.offset;
647a1567
RH
1700 }
1701
1702 /* Rule 9 */
1703 else if (GET_CODE (src) == LO_SUM
1704 && CONST_INT_P (XEXP (src, 1)))
1705 {
43215a89
RH
1706 cur_trace->cfa_temp.reg = dwf_regno (dest);
1707 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
647a1567
RH
1708 }
1709 else
1710 gcc_unreachable ();
1711 }
1712 break;
1713
1714 /* Rule 6 */
1715 case CONST_INT:
43215a89
RH
1716 cur_trace->cfa_temp.reg = dwf_regno (dest);
1717 cur_trace->cfa_temp.offset = INTVAL (src);
647a1567
RH
1718 break;
1719
1720 /* Rule 7 */
1721 case IOR:
1722 gcc_assert (REG_P (XEXP (src, 0))
43215a89 1723 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
647a1567
RH
1724 && CONST_INT_P (XEXP (src, 1)));
1725
43215a89
RH
1726 cur_trace->cfa_temp.reg = dwf_regno (dest);
1727 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
647a1567
RH
1728 break;
1729
1730 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1731 which will fill in all of the bits. */
1732 /* Rule 8 */
1733 case HIGH:
1734 break;
1735
1736 /* Rule 15 */
1737 case UNSPEC:
1738 case UNSPEC_VOLATILE:
89e25f95
BS
1739 /* All unspecs should be represented by REG_CFA_* notes. */
1740 gcc_unreachable ();
647a1567
RH
1741 return;
1742
1743 /* Rule 16 */
1744 case AND:
1745 /* If this AND operation happens on stack pointer in prologue,
1746 we assume the stack is realigned and we extract the
1747 alignment. */
1748 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1749 {
1750 /* We interpret reg_save differently with stack_realign set.
1751 Thus we must flush whatever we have queued first. */
1752 dwarf2out_flush_queued_reg_saves ();
1753
43215a89
RH
1754 gcc_assert (cur_trace->cfa_store.reg
1755 == dwf_regno (XEXP (src, 0)));
647a1567
RH
1756 fde->stack_realign = 1;
1757 fde->stack_realignment = INTVAL (XEXP (src, 1));
43215a89 1758 cur_trace->cfa_store.offset = 0;
647a1567 1759
9a08d230
RH
1760 if (cur_cfa->reg != dw_stack_pointer_regnum
1761 && cur_cfa->reg != dw_frame_pointer_regnum)
1762 fde->drap_reg = cur_cfa->reg;
647a1567
RH
1763 }
1764 return;
1765
1766 default:
1767 gcc_unreachable ();
1768 }
647a1567
RH
1769 break;
1770
1771 case MEM:
1772
1773 /* Saving a register to the stack. Make sure dest is relative to the
1774 CFA register. */
1775 switch (GET_CODE (XEXP (dest, 0)))
1776 {
1777 /* Rule 10 */
1778 /* With a push. */
1779 case PRE_MODIFY:
1780 case POST_MODIFY:
1781 /* We can't handle variable size modifications. */
1782 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1783 == CONST_INT);
1784 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1785
1786 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
43215a89 1787 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
647a1567 1788
43215a89 1789 cur_trace->cfa_store.offset += offset;
9a08d230
RH
1790 if (cur_cfa->reg == dw_stack_pointer_regnum)
1791 cur_cfa->offset = cur_trace->cfa_store.offset;
647a1567
RH
1792
1793 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
43215a89 1794 offset -= cur_trace->cfa_store.offset;
647a1567 1795 else
43215a89 1796 offset = -cur_trace->cfa_store.offset;
647a1567
RH
1797 break;
1798
1799 /* Rule 11 */
1800 case PRE_INC:
1801 case PRE_DEC:
1802 case POST_DEC:
1803 offset = GET_MODE_SIZE (GET_MODE (dest));
1804 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1805 offset = -offset;
1806
1807 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1808 == STACK_POINTER_REGNUM)
43215a89 1809 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
647a1567 1810
43215a89 1811 cur_trace->cfa_store.offset += offset;
647a1567
RH
1812
1813 /* Rule 18: If stack is aligned, we will use FP as a
1814 reference to represent the address of the stored
1815 regiser. */
1816 if (fde
1817 && fde->stack_realign
7b4d5595
L
1818 && REG_P (src)
1819 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
647a1567 1820 {
9a08d230 1821 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
43215a89 1822 cur_trace->cfa_store.offset = 0;
647a1567
RH
1823 }
1824
9a08d230
RH
1825 if (cur_cfa->reg == dw_stack_pointer_regnum)
1826 cur_cfa->offset = cur_trace->cfa_store.offset;
647a1567
RH
1827
1828 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
43215a89 1829 offset += -cur_trace->cfa_store.offset;
647a1567 1830 else
43215a89 1831 offset = -cur_trace->cfa_store.offset;
647a1567
RH
1832 break;
1833
1834 /* Rule 12 */
1835 /* With an offset. */
1836 case PLUS:
1837 case MINUS:
1838 case LO_SUM:
1839 {
7263c6d7 1840 unsigned int regno;
647a1567
RH
1841
1842 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1843 && REG_P (XEXP (XEXP (dest, 0), 0)));
1844 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1845 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1846 offset = -offset;
1847
7263c6d7 1848 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
647a1567 1849
9a08d230
RH
1850 if (cur_cfa->reg == regno)
1851 offset -= cur_cfa->offset;
43215a89
RH
1852 else if (cur_trace->cfa_store.reg == regno)
1853 offset -= cur_trace->cfa_store.offset;
647a1567
RH
1854 else
1855 {
43215a89
RH
1856 gcc_assert (cur_trace->cfa_temp.reg == regno);
1857 offset -= cur_trace->cfa_temp.offset;
647a1567
RH
1858 }
1859 }
1860 break;
1861
1862 /* Rule 13 */
1863 /* Without an offset. */
1864 case REG:
1865 {
7263c6d7 1866 unsigned int regno = dwf_regno (XEXP (dest, 0));
647a1567 1867
9a08d230
RH
1868 if (cur_cfa->reg == regno)
1869 offset = -cur_cfa->offset;
43215a89
RH
1870 else if (cur_trace->cfa_store.reg == regno)
1871 offset = -cur_trace->cfa_store.offset;
647a1567
RH
1872 else
1873 {
43215a89
RH
1874 gcc_assert (cur_trace->cfa_temp.reg == regno);
1875 offset = -cur_trace->cfa_temp.offset;
647a1567
RH
1876 }
1877 }
1878 break;
1879
1880 /* Rule 14 */
1881 case POST_INC:
43215a89
RH
1882 gcc_assert (cur_trace->cfa_temp.reg
1883 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1884 offset = -cur_trace->cfa_temp.offset;
1885 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
647a1567
RH
1886 break;
1887
1888 default:
1889 gcc_unreachable ();
1890 }
1891
a8e5c0e7
RH
1892 /* Rule 17 */
1893 /* If the source operand of this MEM operation is a memory,
1894 we only care how much stack grew. */
1895 if (MEM_P (src))
647a1567
RH
1896 break;
1897
a8e5c0e7
RH
1898 if (REG_P (src)
1899 && REGNO (src) != STACK_POINTER_REGNUM
647a1567 1900 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
9a08d230 1901 && dwf_regno (src) == cur_cfa->reg)
647a1567
RH
1902 {
1903 /* We're storing the current CFA reg into the stack. */
1904
9a08d230 1905 if (cur_cfa->offset == 0)
647a1567
RH
1906 {
1907 /* Rule 19 */
1908 /* If stack is aligned, putting CFA reg into stack means
1909 we can no longer use reg + offset to represent CFA.
1910 Here we use DW_CFA_def_cfa_expression instead. The
1911 result of this expression equals to the original CFA
1912 value. */
1913 if (fde
1914 && fde->stack_realign
9a08d230
RH
1915 && cur_cfa->indirect == 0
1916 && cur_cfa->reg != dw_frame_pointer_regnum)
647a1567 1917 {
9a08d230 1918 gcc_assert (fde->drap_reg == cur_cfa->reg);
647a1567 1919
9a08d230
RH
1920 cur_cfa->indirect = 1;
1921 cur_cfa->reg = dw_frame_pointer_regnum;
1922 cur_cfa->base_offset = offset;
1923 cur_cfa->offset = 0;
647a1567
RH
1924
1925 fde->drap_reg_saved = 1;
647a1567
RH
1926 break;
1927 }
1928
1929 /* If the source register is exactly the CFA, assume
1930 we're saving SP like any other register; this happens
1931 on the ARM. */
89e25f95 1932 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
647a1567
RH
1933 break;
1934 }
1935 else
1936 {
1937 /* Otherwise, we'll need to look in the stack to
1938 calculate the CFA. */
1939 rtx x = XEXP (dest, 0);
1940
1941 if (!REG_P (x))
1942 x = XEXP (x, 0);
1943 gcc_assert (REG_P (x));
1944
9a08d230
RH
1945 cur_cfa->reg = dwf_regno (x);
1946 cur_cfa->base_offset = offset;
1947 cur_cfa->indirect = 1;
647a1567
RH
1948 break;
1949 }
1950 }
1951
a8e5c0e7
RH
1952 if (REG_P (src))
1953 span = targetm.dwarf_register_span (src);
a4d47cac
EB
1954 else
1955 span = NULL;
1956
a8e5c0e7
RH
1957 if (!span)
1958 queue_reg_save (src, NULL_RTX, offset);
1959 else
1960 {
1961 /* We have a PARALLEL describing where the contents of SRC live.
1962 Queue register saves for each piece of the PARALLEL. */
a8e5c0e7 1963 HOST_WIDE_INT span_offset = offset;
647a1567 1964
a8e5c0e7 1965 gcc_assert (GET_CODE (span) == PARALLEL);
647a1567 1966
a4d47cac
EB
1967 const int par_len = XVECLEN (span, 0);
1968 for (int par_index = 0; par_index < par_len; par_index++)
a8e5c0e7
RH
1969 {
1970 rtx elem = XVECEXP (span, 0, par_index);
1971 queue_reg_save (elem, NULL_RTX, span_offset);
1972 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1973 }
1974 }
647a1567
RH
1975 break;
1976
1977 default:
1978 gcc_unreachable ();
1979 }
1980}
1981
9a08d230
RH
1982/* Record call frame debugging information for INSN, which either sets
1983 SP or FP (adjusting how we calculate the frame address) or saves a
1984 register to the stack. */
647a1567 1985
7644b3c7 1986static void
e8a54173 1987dwarf2out_frame_debug (rtx_insn *insn)
647a1567 1988{
e8a54173 1989 rtx note, n, pat;
647a1567 1990 bool handled_one = false;
647a1567
RH
1991
1992 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1993 switch (REG_NOTE_KIND (note))
1994 {
1995 case REG_FRAME_RELATED_EXPR:
e8a54173 1996 pat = XEXP (note, 0);
647a1567
RH
1997 goto do_frame_expr;
1998
1999 case REG_CFA_DEF_CFA:
89e25f95 2000 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
647a1567
RH
2001 handled_one = true;
2002 break;
2003
2004 case REG_CFA_ADJUST_CFA:
2005 n = XEXP (note, 0);
2006 if (n == NULL)
2007 {
2008 n = PATTERN (insn);
2009 if (GET_CODE (n) == PARALLEL)
2010 n = XVECEXP (n, 0, 0);
2011 }
89e25f95 2012 dwarf2out_frame_debug_adjust_cfa (n);
647a1567
RH
2013 handled_one = true;
2014 break;
2015
2016 case REG_CFA_OFFSET:
2017 n = XEXP (note, 0);
2018 if (n == NULL)
2019 n = single_set (insn);
89e25f95 2020 dwarf2out_frame_debug_cfa_offset (n);
647a1567
RH
2021 handled_one = true;
2022 break;
2023
2024 case REG_CFA_REGISTER:
2025 n = XEXP (note, 0);
2026 if (n == NULL)
2027 {
2028 n = PATTERN (insn);
2029 if (GET_CODE (n) == PARALLEL)
2030 n = XVECEXP (n, 0, 0);
2031 }
89e25f95 2032 dwarf2out_frame_debug_cfa_register (n);
647a1567
RH
2033 handled_one = true;
2034 break;
2035
2036 case REG_CFA_EXPRESSION:
2037 n = XEXP (note, 0);
2038 if (n == NULL)
2039 n = single_set (insn);
89e25f95 2040 dwarf2out_frame_debug_cfa_expression (n);
647a1567
RH
2041 handled_one = true;
2042 break;
2043
2044 case REG_CFA_RESTORE:
2045 n = XEXP (note, 0);
2046 if (n == NULL)
2047 {
2048 n = PATTERN (insn);
2049 if (GET_CODE (n) == PARALLEL)
2050 n = XVECEXP (n, 0, 0);
2051 n = XEXP (n, 0);
2052 }
89e25f95 2053 dwarf2out_frame_debug_cfa_restore (n);
647a1567
RH
2054 handled_one = true;
2055 break;
2056
2057 case REG_CFA_SET_VDRAP:
2058 n = XEXP (note, 0);
2059 if (REG_P (n))
2060 {
a518b996 2061 dw_fde_ref fde = cfun->fde;
647a1567
RH
2062 if (fde)
2063 {
2064 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2065 if (REG_P (n))
7263c6d7 2066 fde->vdrap_reg = dwf_regno (n);
647a1567
RH
2067 }
2068 }
2069 handled_one = true;
2070 break;
2071
2072 case REG_CFA_WINDOW_SAVE:
89e25f95 2073 dwarf2out_frame_debug_cfa_window_save ();
647a1567
RH
2074 handled_one = true;
2075 break;
2076
2077 case REG_CFA_FLUSH_QUEUE:
67d7405e 2078 /* The actual flush happens elsewhere. */
647a1567
RH
2079 handled_one = true;
2080 break;
2081
2082 default:
2083 break;
2084 }
2085
67d7405e 2086 if (!handled_one)
647a1567 2087 {
e8a54173 2088 pat = PATTERN (insn);
647a1567 2089 do_frame_expr:
e8a54173 2090 dwarf2out_frame_debug_expr (pat);
647a1567
RH
2091
2092 /* Check again. A parallel can save and update the same register.
2093 We could probably check just once, here, but this is safer than
2094 removing the check at the start of the function. */
e8a54173 2095 if (clobbers_queued_reg_save (pat))
67d7405e 2096 dwarf2out_flush_queued_reg_saves ();
647a1567 2097 }
647a1567
RH
2098}
2099
57e16c96
RH
2100/* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2101
2102static void
ce363ef2 2103change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
57e16c96
RH
2104{
2105 size_t i, n_old, n_new, n_max;
2106 dw_cfi_ref cfi;
2107
2108 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2109 add_cfi (new_row->cfa_cfi);
2110 else
2111 {
2112 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2113 if (cfi)
2114 add_cfi (cfi);
2115 }
2116
9771b263
DN
2117 n_old = vec_safe_length (old_row->reg_save);
2118 n_new = vec_safe_length (new_row->reg_save);
57e16c96
RH
2119 n_max = MAX (n_old, n_new);
2120
2121 for (i = 0; i < n_max; ++i)
2122 {
2123 dw_cfi_ref r_old = NULL, r_new = NULL;
2124
2125 if (i < n_old)
9771b263 2126 r_old = (*old_row->reg_save)[i];
57e16c96 2127 if (i < n_new)
9771b263 2128 r_new = (*new_row->reg_save)[i];
57e16c96
RH
2129
2130 if (r_old == r_new)
2131 ;
2132 else if (r_new == NULL)
2133 add_cfi_restore (i);
2134 else if (!cfi_equal_p (r_old, r_new))
2135 add_cfi (r_new);
2136 }
2137}
2138
89e25f95
BS
2139/* Examine CFI and return true if a cfi label and set_loc is needed
2140 beforehand. Even when generating CFI assembler instructions, we
4a8ee122 2141 still have to add the cfi to the list so that lookup_cfa_1 works
89e25f95
BS
2142 later on. When -g2 and above we even need to force emitting of
2143 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2144 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2145 and so don't use convert_cfa_to_fb_loc_list. */
2146
2147static bool
2148cfi_label_required_p (dw_cfi_ref cfi)
2149{
2150 if (!dwarf2out_do_cfi_asm ())
2151 return true;
2152
2153 if (dwarf_version == 2
2154 && debug_info_level > DINFO_LEVEL_TERSE
2155 && (write_symbols == DWARF2_DEBUG
2156 || write_symbols == VMS_AND_DWARF2_DEBUG))
2157 {
2158 switch (cfi->dw_cfi_opc)
2159 {
2160 case DW_CFA_def_cfa_offset:
2161 case DW_CFA_def_cfa_offset_sf:
2162 case DW_CFA_def_cfa_register:
2163 case DW_CFA_def_cfa:
2164 case DW_CFA_def_cfa_sf:
2165 case DW_CFA_def_cfa_expression:
2166 case DW_CFA_restore_state:
2167 return true;
2168 default:
2169 return false;
2170 }
2171 }
2172 return false;
2173}
2174
2175/* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2176 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2177 necessary. */
2178static void
2179add_cfis_to_fde (void)
2180{
a518b996 2181 dw_fde_ref fde = cfun->fde;
f65c531e 2182 rtx_insn *insn, *next;
89e25f95
BS
2183 /* We always start with a function_begin label. */
2184 bool first = false;
2185
2186 for (insn = get_insns (); insn; insn = next)
2187 {
2188 next = NEXT_INSN (insn);
2189
2190 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2191 {
9771b263 2192 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
89e25f95
BS
2193 /* Don't attempt to advance_loc4 between labels
2194 in different sections. */
2195 first = true;
2196 }
2197
2198 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2199 {
2200 bool required = cfi_label_required_p (NOTE_CFI (insn));
b84dad8e
JJ
2201 while (next)
2202 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2203 {
2204 required |= cfi_label_required_p (NOTE_CFI (next));
2205 next = NEXT_INSN (next);
2206 }
2207 else if (active_insn_p (next)
2208 || (NOTE_P (next) && (NOTE_KIND (next)
2209 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2210 break;
2211 else
89e25f95 2212 next = NEXT_INSN (next);
89e25f95
BS
2213 if (required)
2214 {
2215 int num = dwarf2out_cfi_label_num;
2216 const char *label = dwarf2out_cfi_label ();
2217 dw_cfi_ref xcfi;
89e25f95
BS
2218
2219 /* Set the location counter to the new label. */
2220 xcfi = new_cfi ();
2221 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2222 : DW_CFA_advance_loc4);
2223 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
9771b263 2224 vec_safe_push (fde->dw_fde_cfi, xcfi);
89e25f95 2225
e67d1102 2226 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
89e25f95
BS
2227 NOTE_LABEL_NUMBER (tmp) = num;
2228 }
2229
2230 do
2231 {
b84dad8e 2232 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
9771b263 2233 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
89e25f95
BS
2234 insn = NEXT_INSN (insn);
2235 }
2236 while (insn != next);
2237 first = false;
2238 }
2239 }
2240}
2241
829bdd4b
RH
2242/* If LABEL is the start of a trace, then initialize the state of that
2243 trace from CUR_TRACE and CUR_ROW. */
43215a89 2244
7644b3c7 2245static void
7583d99a 2246maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
89e25f95 2247{
829bdd4b 2248 dw_trace_info *ti;
9a08d230 2249 HOST_WIDE_INT args_size;
829bdd4b
RH
2250
2251 ti = get_trace_info (start);
2252 gcc_assert (ti != NULL);
647a1567 2253
829bdd4b 2254 if (dump_file)
647a1567 2255 {
829bdd4b 2256 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
200e10dc 2257 cur_trace->id, ti->id,
829bdd4b
RH
2258 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2259 (origin ? INSN_UID (origin) : 0));
2260 }
bc5612ed 2261
9a08d230 2262 args_size = cur_trace->end_true_args_size;
829bdd4b
RH
2263 if (ti->beg_row == NULL)
2264 {
2265 /* This is the first time we've encountered this trace. Propagate
2266 state across the edge and push the trace onto the work list. */
2267 ti->beg_row = copy_cfi_row (cur_row);
9a08d230 2268 ti->beg_true_args_size = args_size;
2f23f97a 2269
829bdd4b
RH
2270 ti->cfa_store = cur_trace->cfa_store;
2271 ti->cfa_temp = cur_trace->cfa_temp;
9771b263 2272 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
829bdd4b 2273
9771b263 2274 trace_work_list.safe_push (ti);
829bdd4b
RH
2275
2276 if (dump_file)
200e10dc 2277 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
829bdd4b
RH
2278 }
2279 else
2280 {
9a08d230 2281
829bdd4b
RH
2282 /* We ought to have the same state incoming to a given trace no
2283 matter how we arrive at the trace. Anything else means we've
2284 got some kind of optimization error. */
2285 gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
9a08d230
RH
2286
2287 /* The args_size is allowed to conflict if it isn't actually used. */
2288 if (ti->beg_true_args_size != args_size)
2289 ti->args_size_undefined = true;
2290 }
2291}
2292
2293/* Similarly, but handle the args_size and CFA reset across EH
2294 and non-local goto edges. */
2295
2296static void
7583d99a 2297maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
9a08d230
RH
2298{
2299 HOST_WIDE_INT save_args_size, delta;
2300 dw_cfa_location save_cfa;
2301
2302 save_args_size = cur_trace->end_true_args_size;
2303 if (save_args_size == 0)
2304 {
2305 maybe_record_trace_start (start, origin);
2306 return;
2307 }
2308
2309 delta = -save_args_size;
2310 cur_trace->end_true_args_size = 0;
2311
2312 save_cfa = cur_row->cfa;
2313 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2314 {
2315 /* Convert a change in args_size (always a positive in the
2316 direction of stack growth) to a change in stack pointer. */
581edfa3
TS
2317 if (!STACK_GROWS_DOWNWARD)
2318 delta = -delta;
2319
9a08d230 2320 cur_row->cfa.offset += delta;
829bdd4b 2321 }
9a08d230
RH
2322
2323 maybe_record_trace_start (start, origin);
2324
2325 cur_trace->end_true_args_size = save_args_size;
2326 cur_row->cfa = save_cfa;
829bdd4b 2327}
45fba6d1 2328
829bdd4b
RH
2329/* Propagate CUR_TRACE state to the destinations implied by INSN. */
2330/* ??? Sadly, this is in large part a duplicate of make_edges. */
2331
2332static void
7583d99a 2333create_trace_edges (rtx_insn *insn)
829bdd4b 2334{
ca486330 2335 rtx tmp;
829bdd4b
RH
2336 int i, n;
2337
2338 if (JUMP_P (insn))
2339 {
8942ee0f
DM
2340 rtx_jump_table_data *table;
2341
829bdd4b 2342 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
9a08d230
RH
2343 return;
2344
8942ee0f 2345 if (tablejump_p (insn, NULL, &table))
bc5612ed 2346 {
95c43227 2347 rtvec vec = table->get_labels ();
829bdd4b
RH
2348
2349 n = GET_NUM_ELEM (vec);
2350 for (i = 0; i < n; ++i)
2351 {
7583d99a 2352 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
9a08d230 2353 maybe_record_trace_start (lab, insn);
829bdd4b
RH
2354 }
2355 }
2356 else if (computed_jump_p (insn))
bc5612ed 2357 {
6f7eba34
TS
2358 rtx_insn *temp;
2359 unsigned int i;
2360 FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
2361 maybe_record_trace_start (temp, insn);
829bdd4b
RH
2362 }
2363 else if (returnjump_p (insn))
2364 ;
2365 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2366 {
2367 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2368 for (i = 0; i < n; ++i)
bc5612ed 2369 {
7583d99a
DM
2370 rtx_insn *lab =
2371 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
9a08d230 2372 maybe_record_trace_start (lab, insn);
829bdd4b
RH
2373 }
2374 }
2375 else
2376 {
7583d99a 2377 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
829bdd4b 2378 gcc_assert (lab != NULL);
9a08d230 2379 maybe_record_trace_start (lab, insn);
829bdd4b
RH
2380 }
2381 }
2382 else if (CALL_P (insn))
2383 {
2384 /* Sibling calls don't have edges inside this function. */
2385 if (SIBLING_CALL_P (insn))
2386 return;
965b2557 2387
829bdd4b
RH
2388 /* Process non-local goto edges. */
2389 if (can_nonlocal_goto (insn))
b5241a5a 2390 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2382940b
DM
2391 lab;
2392 lab = lab->next ())
b5241a5a 2393 maybe_record_trace_start_abnormal (lab->insn (), insn);
829bdd4b 2394 }
292d1dfb 2395 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
3382318a 2396 {
292d1dfb 2397 int i, n = seq->len ();
3382318a 2398 for (i = 0; i < n; ++i)
292d1dfb 2399 create_trace_edges (seq->insn (i));
3382318a
RH
2400 return;
2401 }
965b2557 2402
829bdd4b
RH
2403 /* Process EH edges. */
2404 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2405 {
2406 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2407 if (lp)
9a08d230 2408 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
829bdd4b
RH
2409 }
2410}
57e16c96 2411
eebc8f37
RH
2412/* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2413
2414static void
e8a54173 2415scan_insn_after (rtx_insn *insn)
eebc8f37
RH
2416{
2417 if (RTX_FRAME_RELATED_P (insn))
2418 dwarf2out_frame_debug (insn);
2419 notice_args_size (insn);
2420}
2421
829bdd4b
RH
2422/* Scan the trace beginning at INSN and create the CFI notes for the
2423 instructions therein. */
2424
2425static void
2426scan_trace (dw_trace_info *trace)
2427{
7583d99a 2428 rtx_insn *prev, *insn = trace->head;
9a08d230 2429 dw_cfa_location this_cfa;
829bdd4b
RH
2430
2431 if (dump_file)
2432 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
200e10dc 2433 trace->id, rtx_name[(int) GET_CODE (insn)],
829bdd4b
RH
2434 INSN_UID (insn));
2435
2436 trace->end_row = copy_cfi_row (trace->beg_row);
9a08d230 2437 trace->end_true_args_size = trace->beg_true_args_size;
829bdd4b
RH
2438
2439 cur_trace = trace;
2440 cur_row = trace->end_row;
9a08d230
RH
2441
2442 this_cfa = cur_row->cfa;
2443 cur_cfa = &this_cfa;
829bdd4b 2444
eebc8f37
RH
2445 for (prev = insn, insn = NEXT_INSN (insn);
2446 insn;
2447 prev = insn, insn = NEXT_INSN (insn))
829bdd4b 2448 {
7583d99a 2449 rtx_insn *control;
eebc8f37 2450
9a08d230 2451 /* Do everything that happens "before" the insn. */
eebc8f37 2452 add_cfi_insn = prev;
829bdd4b
RH
2453
2454 /* Notice the end of a trace. */
9a08d230
RH
2455 if (BARRIER_P (insn))
2456 {
2457 /* Don't bother saving the unneeded queued registers at all. */
9771b263 2458 queued_reg_saves.truncate (0);
9a08d230
RH
2459 break;
2460 }
2461 if (save_point_p (insn))
829bdd4b 2462 {
829bdd4b 2463 /* Propagate across fallthru edges. */
9a08d230
RH
2464 dwarf2out_flush_queued_reg_saves ();
2465 maybe_record_trace_start (insn, NULL);
829bdd4b 2466 break;
bc5612ed
BS
2467 }
2468
829bdd4b 2469 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
bc5612ed
BS
2470 continue;
2471
eebc8f37
RH
2472 /* Handle all changes to the row state. Sequences require special
2473 handling for the positioning of the notes. */
292d1dfb 2474 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
bc5612ed 2475 {
e8a54173 2476 rtx_insn *elt;
292d1dfb 2477 int i, n = pat->len ();
9a08d230 2478
7583d99a 2479 control = pat->insn (0);
eebc8f37
RH
2480 if (can_throw_internal (control))
2481 notice_eh_throw (control);
2482 dwarf2out_flush_queued_reg_saves ();
2483
8f06d483 2484 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
829bdd4b 2485 {
9a08d230
RH
2486 /* ??? Hopefully multiple delay slots are not annulled. */
2487 gcc_assert (n == 2);
eebc8f37
RH
2488 gcc_assert (!RTX_FRAME_RELATED_P (control));
2489 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2490
e8a54173 2491 elt = pat->insn (1);
9a08d230 2492
9a08d230
RH
2493 if (INSN_FROM_TARGET_P (elt))
2494 {
2495 HOST_WIDE_INT restore_args_size;
5d1f1cd5 2496 cfi_vec save_row_reg_save;
829bdd4b 2497
bf27c43e
RH
2498 /* If ELT is an instruction from target of an annulled
2499 branch, the effects are for the target only and so
2500 the args_size and CFA along the current path
2501 shouldn't change. */
eebc8f37 2502 add_cfi_insn = NULL;
9a08d230
RH
2503 restore_args_size = cur_trace->end_true_args_size;
2504 cur_cfa = &cur_row->cfa;
9771b263 2505 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
bc5612ed 2506
eebc8f37
RH
2507 scan_insn_after (elt);
2508
2509 /* ??? Should we instead save the entire row state? */
9771b263 2510 gcc_assert (!queued_reg_saves.length ());
eebc8f37
RH
2511
2512 create_trace_edges (control);
bc5612ed 2513
9a08d230
RH
2514 cur_trace->end_true_args_size = restore_args_size;
2515 cur_row->cfa = this_cfa;
5d1f1cd5 2516 cur_row->reg_save = save_row_reg_save;
9a08d230 2517 cur_cfa = &this_cfa;
9a08d230 2518 }
bf27c43e
RH
2519 else
2520 {
2521 /* If ELT is a annulled branch-taken instruction (i.e.
2522 executed only when branch is not taken), the args_size
2523 and CFA should not change through the jump. */
2524 create_trace_edges (control);
2525
2526 /* Update and continue with the trace. */
2527 add_cfi_insn = insn;
2528 scan_insn_after (elt);
2529 def_cfa_1 (&this_cfa);
2530 }
2531 continue;
9a08d230
RH
2532 }
2533
eebc8f37
RH
2534 /* The insns in the delay slot should all be considered to happen
2535 "before" a call insn. Consider a call with a stack pointer
2536 adjustment in the delay slot. The backtrace from the callee
2537 should include the sp adjustment. Unfortunately, that leaves
2538 us with an unavoidable unwinding error exactly at the call insn
2539 itself. For jump insns we'd prefer to avoid this error by
2540 placing the notes after the sequence. */
2541 if (JUMP_P (control))
2542 add_cfi_insn = insn;
2543
9a08d230
RH
2544 for (i = 1; i < n; ++i)
2545 {
e8a54173 2546 elt = pat->insn (i);
eebc8f37 2547 scan_insn_after (elt);
9a08d230 2548 }
eebc8f37
RH
2549
2550 /* Make sure any register saves are visible at the jump target. */
2551 dwarf2out_flush_queued_reg_saves ();
67d7405e 2552 any_cfis_emitted = false;
eebc8f37
RH
2553
2554 /* However, if there is some adjustment on the call itself, e.g.
2555 a call_pop, that action should be considered to happen after
2556 the call returns. */
2557 add_cfi_insn = insn;
2558 scan_insn_after (control);
829bdd4b 2559 }
9a08d230 2560 else
eebc8f37
RH
2561 {
2562 /* Flush data before calls and jumps, and of course if necessary. */
2563 if (can_throw_internal (insn))
2564 {
2565 notice_eh_throw (insn);
2566 dwarf2out_flush_queued_reg_saves ();
2567 }
2568 else if (!NONJUMP_INSN_P (insn)
2569 || clobbers_queued_reg_save (insn)
2570 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2571 dwarf2out_flush_queued_reg_saves ();
67d7405e 2572 any_cfis_emitted = false;
eebc8f37
RH
2573
2574 add_cfi_insn = insn;
2575 scan_insn_after (insn);
2576 control = insn;
2577 }
9a08d230
RH
2578
2579 /* Between frame-related-p and args_size we might have otherwise
2580 emitted two cfa adjustments. Do it now. */
2581 def_cfa_1 (&this_cfa);
45fba6d1 2582
67d7405e
RH
2583 /* Minimize the number of advances by emitting the entire queue
2584 once anything is emitted. */
2585 if (any_cfis_emitted
2586 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2587 dwarf2out_flush_queued_reg_saves ();
2588
829bdd4b
RH
2589 /* Note that a test for control_flow_insn_p does exactly the
2590 same tests as are done to actually create the edges. So
2591 always call the routine and let it not create edges for
2592 non-control-flow insns. */
eebc8f37 2593 create_trace_edges (control);
647a1567 2594 }
45fba6d1 2595
141618e2 2596 add_cfi_insn = NULL;
829bdd4b
RH
2597 cur_row = NULL;
2598 cur_trace = NULL;
9a08d230 2599 cur_cfa = NULL;
647a1567
RH
2600}
2601
829bdd4b 2602/* Scan the function and create the initial set of CFI notes. */
647a1567 2603
bc5612ed 2604static void
829bdd4b 2605create_cfi_notes (void)
647a1567 2606{
829bdd4b 2607 dw_trace_info *ti;
647a1567 2608
9771b263
DN
2609 gcc_checking_assert (!queued_reg_saves.exists ());
2610 gcc_checking_assert (!trace_work_list.exists ());
647a1567 2611
829bdd4b 2612 /* Always begin at the entry trace. */
9771b263 2613 ti = &trace_info[0];
829bdd4b 2614 scan_trace (ti);
647a1567 2615
9771b263 2616 while (!trace_work_list.is_empty ())
829bdd4b 2617 {
9771b263 2618 ti = trace_work_list.pop ();
829bdd4b 2619 scan_trace (ti);
647a1567
RH
2620 }
2621
9771b263
DN
2622 queued_reg_saves.release ();
2623 trace_work_list.release ();
829bdd4b 2624}
647a1567 2625
200e10dc
RH
2626/* Return the insn before the first NOTE_INSN_CFI after START. */
2627
dc01c3d1
DM
2628static rtx_insn *
2629before_next_cfi_note (rtx_insn *start)
200e10dc 2630{
dc01c3d1 2631 rtx_insn *prev = start;
200e10dc
RH
2632 while (start)
2633 {
2634 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2635 return prev;
2636 prev = start;
2637 start = NEXT_INSN (start);
2638 }
2639 gcc_unreachable ();
2640}
2641
829bdd4b 2642/* Insert CFI notes between traces to properly change state between them. */
647a1567 2643
829bdd4b
RH
2644static void
2645connect_traces (void)
2646{
9771b263 2647 unsigned i, n = trace_info.length ();
829bdd4b
RH
2648 dw_trace_info *prev_ti, *ti;
2649
200e10dc
RH
2650 /* ??? Ideally, we should have both queued and processed every trace.
2651 However the current representation of constant pools on various targets
2652 is indistinguishable from unreachable code. Assume for the moment that
2653 we can simply skip over such traces. */
2654 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2655 these are not "real" instructions, and should not be considered.
2656 This could be generically useful for tablejump data as well. */
2657 /* Remove all unprocessed traces from the list. */
2658 for (i = n - 1; i > 0; --i)
2659 {
9771b263 2660 ti = &trace_info[i];
200e10dc
RH
2661 if (ti->beg_row == NULL)
2662 {
9771b263 2663 trace_info.ordered_remove (i);
200e10dc
RH
2664 n -= 1;
2665 }
2666 else
2667 gcc_assert (ti->end_row != NULL);
2668 }
829bdd4b 2669
200e10dc
RH
2670 /* Work from the end back to the beginning. This lets us easily insert
2671 remember/restore_state notes in the correct order wrt other notes. */
9771b263 2672 prev_ti = &trace_info[n - 1];
200e10dc 2673 for (i = n - 1; i > 0; --i)
647a1567 2674 {
829bdd4b 2675 dw_cfi_row *old_row;
647a1567 2676
200e10dc 2677 ti = prev_ti;
9771b263 2678 prev_ti = &trace_info[i - 1];
647a1567 2679
200e10dc 2680 add_cfi_insn = ti->head;
829bdd4b
RH
2681
2682 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2683 for the portion of the function in the alternate text
2684 section. The row state at the very beginning of that
2685 new FDE will be exactly the row state from the CIE. */
2686 if (ti->switch_sections)
2687 old_row = cie_cfi_row;
2688 else
200e10dc
RH
2689 {
2690 old_row = prev_ti->end_row;
2691 /* If there's no change from the previous end state, fine. */
2692 if (cfi_row_equal_p (old_row, ti->beg_row))
2693 ;
2694 /* Otherwise check for the common case of sharing state with
2695 the beginning of an epilogue, but not the end. Insert
2696 remember/restore opcodes in that case. */
2697 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2698 {
2699 dw_cfi_ref cfi;
2700
2701 /* Note that if we blindly insert the remember at the
2702 start of the trace, we can wind up increasing the
2703 size of the unwind info due to extra advance opcodes.
2704 Instead, put the remember immediately before the next
2705 state change. We know there must be one, because the
2706 state at the beginning and head of the trace differ. */
2707 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2708 cfi = new_cfi ();
2709 cfi->dw_cfi_opc = DW_CFA_remember_state;
2710 add_cfi (cfi);
2711
2712 add_cfi_insn = ti->head;
2713 cfi = new_cfi ();
2714 cfi->dw_cfi_opc = DW_CFA_restore_state;
2715 add_cfi (cfi);
2716
2717 old_row = prev_ti->beg_row;
2718 }
2719 /* Otherwise, we'll simply change state from the previous end. */
2720 }
829bdd4b 2721
829bdd4b
RH
2722 change_cfi_row (old_row, ti->beg_row);
2723
2724 if (dump_file && add_cfi_insn != ti->head)
2725 {
dc01c3d1 2726 rtx_insn *note;
829bdd4b 2727
200e10dc
RH
2728 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2729 prev_ti->id, ti->id);
829bdd4b
RH
2730
2731 note = ti->head;
2732 do
2733 {
2734 note = NEXT_INSN (note);
2735 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2736 output_cfi_directive (dump_file, NOTE_CFI (note));
2737 }
2738 while (note != add_cfi_insn);
2739 }
2740 }
9a08d230
RH
2741
2742 /* Connect args_size between traces that have can_throw_internal insns. */
9771b263 2743 if (cfun->eh->lp_array)
9a08d230
RH
2744 {
2745 HOST_WIDE_INT prev_args_size = 0;
2746
2747 for (i = 0; i < n; ++i)
2748 {
9771b263 2749 ti = &trace_info[i];
9a08d230
RH
2750
2751 if (ti->switch_sections)
2752 prev_args_size = 0;
2753 if (ti->eh_head == NULL)
2754 continue;
2755 gcc_assert (!ti->args_size_undefined);
2756
2757 if (ti->beg_delay_args_size != prev_args_size)
2758 {
2759 /* ??? Search back to previous CFI note. */
2760 add_cfi_insn = PREV_INSN (ti->eh_head);
2761 add_cfi_args_size (ti->beg_delay_args_size);
2762 }
2763
2764 prev_args_size = ti->end_delay_args_size;
2765 }
2766 }
647a1567
RH
2767}
2768
829bdd4b
RH
2769/* Set up the pseudo-cfg of instruction traces, as described at the
2770 block comment at the top of the file. */
647a1567 2771
bc5612ed 2772static void
829bdd4b 2773create_pseudo_cfg (void)
647a1567 2774{
829bdd4b 2775 bool saw_barrier, switch_sections;
f32682ca 2776 dw_trace_info ti;
f65c531e 2777 rtx_insn *insn;
829bdd4b
RH
2778 unsigned i;
2779
2780 /* The first trace begins at the start of the function,
2781 and begins with the CIE row state. */
9771b263 2782 trace_info.create (16);
f32682ca
DN
2783 memset (&ti, 0, sizeof (ti));
2784 ti.head = get_insns ();
2785 ti.beg_row = cie_cfi_row;
2786 ti.cfa_store = cie_cfi_row->cfa;
2787 ti.cfa_temp.reg = INVALID_REGNUM;
9771b263 2788 trace_info.quick_push (ti);
829bdd4b 2789
829bdd4b 2790 if (cie_return_save)
9771b263 2791 ti.regs_saved_in_regs.safe_push (*cie_return_save);
647a1567 2792
829bdd4b
RH
2793 /* Walk all the insns, collecting start of trace locations. */
2794 saw_barrier = false;
2795 switch_sections = false;
2796 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2797 {
2798 if (BARRIER_P (insn))
2799 saw_barrier = true;
2800 else if (NOTE_P (insn)
2801 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2802 {
2803 /* We should have just seen a barrier. */
2804 gcc_assert (saw_barrier);
2805 switch_sections = true;
2806 }
2807 /* Watch out for save_point notes between basic blocks.
2808 In particular, a note after a barrier. Do not record these,
2809 delaying trace creation until the label. */
2810 else if (save_point_p (insn)
2811 && (LABEL_P (insn) || !saw_barrier))
2812 {
f32682ca
DN
2813 memset (&ti, 0, sizeof (ti));
2814 ti.head = insn;
2815 ti.switch_sections = switch_sections;
f8ed2fc2 2816 ti.id = trace_info.length ();
9771b263 2817 trace_info.safe_push (ti);
829bdd4b
RH
2818
2819 saw_barrier = false;
2820 switch_sections = false;
2821 }
2822 }
2823
2824 /* Create the trace index after we've finished building trace_info,
2825 avoiding stale pointer problems due to reallocation. */
c203e8a7
TS
2826 trace_index
2827 = new hash_table<trace_info_hasher> (trace_info.length ());
f32682ca 2828 dw_trace_info *tp;
9771b263 2829 FOR_EACH_VEC_ELT (trace_info, i, tp)
829bdd4b 2830 {
4a8fb1a1 2831 dw_trace_info **slot;
647a1567 2832
829bdd4b 2833 if (dump_file)
f8ed2fc2 2834 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
f32682ca
DN
2835 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2836 tp->switch_sections ? " (section switch)" : "");
829bdd4b 2837
c203e8a7 2838 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
829bdd4b 2839 gcc_assert (*slot == NULL);
4a8fb1a1 2840 *slot = tp;
829bdd4b 2841 }
647a1567 2842}
829bdd4b 2843
a8e5c0e7
RH
2844/* Record the initial position of the return address. RTL is
2845 INCOMING_RETURN_ADDR_RTX. */
2846
2847static void
2848initial_return_save (rtx rtl)
2849{
2850 unsigned int reg = INVALID_REGNUM;
2851 HOST_WIDE_INT offset = 0;
2852
2853 switch (GET_CODE (rtl))
2854 {
2855 case REG:
2856 /* RA is in a register. */
7263c6d7 2857 reg = dwf_regno (rtl);
a8e5c0e7
RH
2858 break;
2859
2860 case MEM:
2861 /* RA is on the stack. */
2862 rtl = XEXP (rtl, 0);
2863 switch (GET_CODE (rtl))
2864 {
2865 case REG:
2866 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2867 offset = 0;
2868 break;
2869
2870 case PLUS:
2871 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2872 offset = INTVAL (XEXP (rtl, 1));
2873 break;
2874
2875 case MINUS:
2876 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2877 offset = -INTVAL (XEXP (rtl, 1));
2878 break;
2879
2880 default:
2881 gcc_unreachable ();
2882 }
2883
2884 break;
2885
2886 case PLUS:
2887 /* The return address is at some offset from any value we can
2888 actually load. For instance, on the SPARC it is in %i7+8. Just
2889 ignore the offset for now; it doesn't matter for unwinding frames. */
2890 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2891 initial_return_save (XEXP (rtl, 0));
2892 return;
2893
2894 default:
2895 gcc_unreachable ();
2896 }
2897
2898 if (reg != DWARF_FRAME_RETURN_COLUMN)
2899 {
2900 if (reg != INVALID_REGNUM)
2901 record_reg_saved_in_reg (rtl, pc_rtx);
f17d3401 2902 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
a8e5c0e7
RH
2903 }
2904}
647a1567 2905
43215a89
RH
2906static void
2907create_cie_data (void)
2908{
2909 dw_cfa_location loc;
2910 dw_trace_info cie_trace;
2911
2912 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
43215a89 2913
c3284718 2914 memset (&cie_trace, 0, sizeof (cie_trace));
43215a89
RH
2915 cur_trace = &cie_trace;
2916
2917 add_cfi_vec = &cie_cfi_vec;
2918 cie_cfi_row = cur_row = new_cfi_row ();
2919
2920 /* On entry, the Canonical Frame Address is at SP. */
c3284718 2921 memset (&loc, 0, sizeof (loc));
43215a89
RH
2922 loc.reg = dw_stack_pointer_regnum;
2923 loc.offset = INCOMING_FRAME_SP_OFFSET;
2924 def_cfa_1 (&loc);
2925
2926 if (targetm.debug_unwind_info () == UI_DWARF2
2927 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2928 {
2929 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2930
2931 /* For a few targets, we have the return address incoming into a
2932 register, but choose a different return column. This will result
2933 in a DW_CFA_register for the return, and an entry in
2934 regs_saved_in_regs to match. If the target later stores that
2935 return address register to the stack, we want to be able to emit
2936 the DW_CFA_offset against the return column, not the intermediate
2937 save register. Save the contents of regs_saved_in_regs so that
2938 we can re-initialize it at the start of each function. */
9771b263 2939 switch (cie_trace.regs_saved_in_regs.length ())
43215a89
RH
2940 {
2941 case 0:
2942 break;
2943 case 1:
766090c2 2944 cie_return_save = ggc_alloc<reg_saved_in_data> ();
9771b263
DN
2945 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2946 cie_trace.regs_saved_in_regs.release ();
43215a89
RH
2947 break;
2948 default:
2949 gcc_unreachable ();
2950 }
2951 }
2952
2953 add_cfi_vec = NULL;
2954 cur_row = NULL;
2955 cur_trace = NULL;
2956}
2957
7644b3c7
RH
2958/* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2959 state at each location within the function. These notes will be
2960 emitted during pass_final. */
647a1567 2961
7644b3c7
RH
2962static unsigned int
2963execute_dwarf2_frame (void)
647a1567 2964{
703fa2e6
CB
2965 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
2966 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2967
7644b3c7
RH
2968 /* The first time we're called, compute the incoming frame state. */
2969 if (cie_cfi_vec == NULL)
43215a89 2970 create_cie_data ();
3edb53aa 2971
7644b3c7
RH
2972 dwarf2out_alloc_current_fde ();
2973
829bdd4b
RH
2974 create_pseudo_cfg ();
2975
7644b3c7
RH
2976 /* Do the work. */
2977 create_cfi_notes ();
829bdd4b 2978 connect_traces ();
7644b3c7
RH
2979 add_cfis_to_fde ();
2980
829bdd4b
RH
2981 /* Free all the data we allocated. */
2982 {
2983 size_t i;
2984 dw_trace_info *ti;
647a1567 2985
9771b263
DN
2986 FOR_EACH_VEC_ELT (trace_info, i, ti)
2987 ti->regs_saved_in_regs.release ();
829bdd4b 2988 }
9771b263 2989 trace_info.release ();
829bdd4b 2990
c203e8a7
TS
2991 delete trace_index;
2992 trace_index = NULL;
f17d3401 2993
7644b3c7 2994 return 0;
647a1567
RH
2995}
2996\f
948d330e
RH
2997/* Convert a DWARF call frame info. operation to its string name */
2998
2999static const char *
3000dwarf_cfi_name (unsigned int cfi_opc)
3001{
11ec770e 3002 const char *name = get_DW_CFA_name (cfi_opc);
948d330e 3003
11ec770e
TT
3004 if (name != NULL)
3005 return name;
948d330e 3006
11ec770e 3007 return "DW_CFA_<unknown>";
948d330e
RH
3008}
3009
3010/* This routine will generate the correct assembly data for a location
3011 description based on a cfi entry with a complex address. */
3012
3013static void
3014output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3015{
3016 dw_loc_descr_ref loc;
3017 unsigned long size;
3018
3019 if (cfi->dw_cfi_opc == DW_CFA_expression)
3020 {
43215a89 3021 unsigned r =
948d330e
RH
3022 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3023 dw2_asm_output_data (1, r, NULL);
3024 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3025 }
3026 else
3027 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3028
3029 /* Output the size of the block. */
3030 size = size_of_locs (loc);
3031 dw2_asm_output_data_uleb128 (size, NULL);
3032
3033 /* Now output the operations themselves. */
3034 output_loc_sequence (loc, for_eh);
3035}
3036
3037/* Similar, but used for .cfi_escape. */
3038
3039static void
3040output_cfa_loc_raw (dw_cfi_ref cfi)
3041{
3042 dw_loc_descr_ref loc;
3043 unsigned long size;
3044
3045 if (cfi->dw_cfi_opc == DW_CFA_expression)
3046 {
43215a89 3047 unsigned r =
948d330e
RH
3048 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3049 fprintf (asm_out_file, "%#x,", r);
3050 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3051 }
3052 else
3053 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3054
3055 /* Output the size of the block. */
3056 size = size_of_locs (loc);
3057 dw2_asm_output_data_uleb128_raw (size);
3058 fputc (',', asm_out_file);
3059
3060 /* Now output the operations themselves. */
3061 output_loc_sequence_raw (loc);
3062}
3063
3064/* Output a Call Frame Information opcode and its operand(s). */
3065
3066void
3067output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3068{
3069 unsigned long r;
3070 HOST_WIDE_INT off;
3071
3072 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3073 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3074 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3075 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3076 ((unsigned HOST_WIDE_INT)
3077 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3078 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3079 {
3080 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3081 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3082 "DW_CFA_offset, column %#lx", r);
3083 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3084 dw2_asm_output_data_uleb128 (off, NULL);
3085 }
3086 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3087 {
3088 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3089 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3090 "DW_CFA_restore, column %#lx", r);
3091 }
3092 else
3093 {
3094 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3095 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3096
3097 switch (cfi->dw_cfi_opc)
3098 {
3099 case DW_CFA_set_loc:
3100 if (for_eh)
3101 dw2_asm_output_encoded_addr_rtx (
3102 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3103 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3104 false, NULL);
3105 else
3106 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3107 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3108 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3109 break;
3110
3111 case DW_CFA_advance_loc1:
3112 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3113 fde->dw_fde_current_label, NULL);
3114 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3115 break;
3116
3117 case DW_CFA_advance_loc2:
3118 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3119 fde->dw_fde_current_label, NULL);
3120 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3121 break;
3122
3123 case DW_CFA_advance_loc4:
3124 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3125 fde->dw_fde_current_label, NULL);
3126 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3127 break;
3128
3129 case DW_CFA_MIPS_advance_loc8:
3130 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3131 fde->dw_fde_current_label, NULL);
3132 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3133 break;
3134
3135 case DW_CFA_offset_extended:
3136 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3137 dw2_asm_output_data_uleb128 (r, NULL);
3138 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3139 dw2_asm_output_data_uleb128 (off, NULL);
3140 break;
3141
3142 case DW_CFA_def_cfa:
3143 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3144 dw2_asm_output_data_uleb128 (r, NULL);
3145 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3146 break;
3147
3148 case DW_CFA_offset_extended_sf:
3149 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3150 dw2_asm_output_data_uleb128 (r, NULL);
3151 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3152 dw2_asm_output_data_sleb128 (off, NULL);
3153 break;
3154
3155 case DW_CFA_def_cfa_sf:
3156 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3157 dw2_asm_output_data_uleb128 (r, NULL);
3158 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3159 dw2_asm_output_data_sleb128 (off, NULL);
3160 break;
3161
3162 case DW_CFA_restore_extended:
3163 case DW_CFA_undefined:
3164 case DW_CFA_same_value:
3165 case DW_CFA_def_cfa_register:
3166 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3167 dw2_asm_output_data_uleb128 (r, NULL);
3168 break;
3169
3170 case DW_CFA_register:
3171 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3172 dw2_asm_output_data_uleb128 (r, NULL);
3173 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3174 dw2_asm_output_data_uleb128 (r, NULL);
3175 break;
3176
3177 case DW_CFA_def_cfa_offset:
3178 case DW_CFA_GNU_args_size:
3179 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3180 break;
3181
3182 case DW_CFA_def_cfa_offset_sf:
3183 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3184 dw2_asm_output_data_sleb128 (off, NULL);
3185 break;
3186
3187 case DW_CFA_GNU_window_save:
3188 break;
3189
3190 case DW_CFA_def_cfa_expression:
3191 case DW_CFA_expression:
3192 output_cfa_loc (cfi, for_eh);
3193 break;
3194
3195 case DW_CFA_GNU_negative_offset_extended:
3196 /* Obsoleted by DW_CFA_offset_extended_sf. */
3197 gcc_unreachable ();
3198
3199 default:
3200 break;
3201 }
3202 }
3203}
3204
3205/* Similar, but do it via assembler directives instead. */
3206
3207void
3208output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3209{
3210 unsigned long r, r2;
3211
3212 switch (cfi->dw_cfi_opc)
3213 {
3214 case DW_CFA_advance_loc:
3215 case DW_CFA_advance_loc1:
3216 case DW_CFA_advance_loc2:
3217 case DW_CFA_advance_loc4:
3218 case DW_CFA_MIPS_advance_loc8:
3219 case DW_CFA_set_loc:
3220 /* Should only be created in a code path not followed when emitting
3221 via directives. The assembler is going to take care of this for
3222 us. But this routines is also used for debugging dumps, so
3223 print something. */
3224 gcc_assert (f != asm_out_file);
3225 fprintf (f, "\t.cfi_advance_loc\n");
3226 break;
3227
3228 case DW_CFA_offset:
3229 case DW_CFA_offset_extended:
3230 case DW_CFA_offset_extended_sf:
3231 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
16998094 3232 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
948d330e
RH
3233 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3234 break;
3235
3236 case DW_CFA_restore:
3237 case DW_CFA_restore_extended:
3238 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3239 fprintf (f, "\t.cfi_restore %lu\n", r);
3240 break;
3241
3242 case DW_CFA_undefined:
3243 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3244 fprintf (f, "\t.cfi_undefined %lu\n", r);
3245 break;
3246
3247 case DW_CFA_same_value:
3248 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3249 fprintf (f, "\t.cfi_same_value %lu\n", r);
3250 break;
3251
3252 case DW_CFA_def_cfa:
3253 case DW_CFA_def_cfa_sf:
3254 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
16998094 3255 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
948d330e
RH
3256 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3257 break;
3258
3259 case DW_CFA_def_cfa_register:
3260 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3261 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3262 break;
3263
3264 case DW_CFA_register:
3265 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3266 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3267 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3268 break;
3269
3270 case DW_CFA_def_cfa_offset:
3271 case DW_CFA_def_cfa_offset_sf:
3272 fprintf (f, "\t.cfi_def_cfa_offset "
3273 HOST_WIDE_INT_PRINT_DEC"\n",
3274 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3275 break;
3276
3277 case DW_CFA_remember_state:
3278 fprintf (f, "\t.cfi_remember_state\n");
3279 break;
3280 case DW_CFA_restore_state:
3281 fprintf (f, "\t.cfi_restore_state\n");
3282 break;
3283
3284 case DW_CFA_GNU_args_size:
3285 if (f == asm_out_file)
3286 {
3287 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3288 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3289 if (flag_debug_asm)
16998094 3290 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
948d330e
RH
3291 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3292 fputc ('\n', f);
3293 }
3294 else
3295 {
16998094 3296 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
948d330e
RH
3297 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3298 }
3299 break;
3300
3301 case DW_CFA_GNU_window_save:
3302 fprintf (f, "\t.cfi_window_save\n");
3303 break;
3304
3305 case DW_CFA_def_cfa_expression:
3306 if (f != asm_out_file)
3307 {
3308 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3309 break;
3310 }
3311 /* FALLTHRU */
3312 case DW_CFA_expression:
3313 if (f != asm_out_file)
3314 {
3315 fprintf (f, "\t.cfi_cfa_expression ...\n");
3316 break;
3317 }
3318 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3319 output_cfa_loc_raw (cfi);
3320 fputc ('\n', f);
3321 break;
3322
3323 default:
3324 gcc_unreachable ();
3325 }
3326}
3327
3328void
3329dwarf2out_emit_cfi (dw_cfi_ref cfi)
3330{
3331 if (dwarf2out_do_cfi_asm ())
3332 output_cfi_directive (asm_out_file, cfi);
3333}
a5d0ce89
RH
3334
3335static void
3336dump_cfi_row (FILE *f, dw_cfi_row *row)
3337{
3338 dw_cfi_ref cfi;
3339 unsigned i;
3340
3341 cfi = row->cfa_cfi;
3342 if (!cfi)
3343 {
3344 dw_cfa_location dummy;
c3284718 3345 memset (&dummy, 0, sizeof (dummy));
a5d0ce89
RH
3346 dummy.reg = INVALID_REGNUM;
3347 cfi = def_cfa_0 (&dummy, &row->cfa);
3348 }
3349 output_cfi_directive (f, cfi);
3350
9771b263 3351 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
a5d0ce89
RH
3352 if (cfi)
3353 output_cfi_directive (f, cfi);
a5d0ce89
RH
3354}
3355
3356void debug_cfi_row (dw_cfi_row *row);
3357
3358void
3359debug_cfi_row (dw_cfi_row *row)
3360{
3361 dump_cfi_row (stderr, row);
3362}
948d330e 3363\f
647a1567 3364
7644b3c7
RH
3365/* Save the result of dwarf2out_do_frame across PCH.
3366 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3367static GTY(()) signed char saved_do_cfi_asm = 0;
647a1567
RH
3368
3369/* Decide whether we want to emit frame unwind information for the current
3370 translation unit. */
3371
7644b3c7 3372bool
647a1567
RH
3373dwarf2out_do_frame (void)
3374{
3375 /* We want to emit correct CFA location expressions or lists, so we
3376 have to return true if we're going to output debug info, even if
3377 we're not going to output frame or unwind info. */
3378 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3379 return true;
3380
7644b3c7 3381 if (saved_do_cfi_asm > 0)
647a1567
RH
3382 return true;
3383
3384 if (targetm.debug_unwind_info () == UI_DWARF2)
3385 return true;
3386
3387 if ((flag_unwind_tables || flag_exceptions)
3388 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3389 return true;
3390
3391 return false;
3392}
3393
3394/* Decide whether to emit frame unwind via assembler directives. */
3395
7644b3c7 3396bool
647a1567
RH
3397dwarf2out_do_cfi_asm (void)
3398{
3399 int enc;
3400
7644b3c7
RH
3401 if (saved_do_cfi_asm != 0)
3402 return saved_do_cfi_asm > 0;
3403
3404 /* Assume failure for a moment. */
3405 saved_do_cfi_asm = -1;
3406
647a1567
RH
3407 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3408 return false;
3409 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3410 return false;
3411
3412 /* Make sure the personality encoding is one the assembler can support.
3413 In particular, aligned addresses can't be handled. */
3414 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3415 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3416 return false;
3417 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3418 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3419 return false;
3420
3421 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3422 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3423 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3424 && !flag_unwind_tables && !flag_exceptions
3425 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3426 return false;
3427
7644b3c7
RH
3428 /* Success! */
3429 saved_do_cfi_asm = 1;
647a1567
RH
3430 return true;
3431}
3432
27a4cd48
DM
3433namespace {
3434
3435const pass_data pass_data_dwarf2_frame =
3436{
3437 RTL_PASS, /* type */
3438 "dwarf2", /* name */
3439 OPTGROUP_NONE, /* optinfo_flags */
27a4cd48
DM
3440 TV_FINAL, /* tv_id */
3441 0, /* properties_required */
3442 0, /* properties_provided */
3443 0, /* properties_destroyed */
3444 0, /* todo_flags_start */
3445 0, /* todo_flags_finish */
7644b3c7
RH
3446};
3447
27a4cd48
DM
3448class pass_dwarf2_frame : public rtl_opt_pass
3449{
3450public:
c3284718
RS
3451 pass_dwarf2_frame (gcc::context *ctxt)
3452 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
27a4cd48
DM
3453 {}
3454
3455 /* opt_pass methods: */
1a3d085c 3456 virtual bool gate (function *);
be55bfe6 3457 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
27a4cd48
DM
3458
3459}; // class pass_dwarf2_frame
3460
1a3d085c
TS
3461bool
3462pass_dwarf2_frame::gate (function *)
3463{
1a3d085c
TS
3464 /* Targets which still implement the prologue in assembler text
3465 cannot use the generic dwarf2 unwinding. */
e86a9946
RS
3466 if (!targetm.have_prologue ())
3467 return false;
1a3d085c
TS
3468
3469 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3470 from the optimized shrink-wrapping annotations that we will compute.
3471 For now, only produce the CFI notes for dwarf2. */
3472 return dwarf2out_do_frame ();
3473}
3474
27a4cd48
DM
3475} // anon namespace
3476
3477rtl_opt_pass *
3478make_pass_dwarf2_frame (gcc::context *ctxt)
3479{
3480 return new pass_dwarf2_frame (ctxt);
3481}
3482
647a1567 3483#include "gt-dwarf2cfi.h"