]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/dwarf2cfi.c
[34/77] Add a SCALAR_INT_TYPE_MODE macro
[thirdparty/gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "target.h"
24 #include "function.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tree-pass.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "emit-rtl.h"
31 #include "stor-layout.h"
32 #include "cfgbuild.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
36
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "profile-count.h" /* For expr.h */
39 #include "expr.h" /* init_return_column_size */
40 #include "output.h" /* asm_out_file */
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
42
43
44 /* ??? Poison these here until it can be done generically. They've been
45 totally replaced in this file; make sure it stays that way. */
46 #undef DWARF2_UNWIND_INFO
47 #undef DWARF2_FRAME_INFO
48 #if (GCC_VERSION >= 3000)
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
50 #endif
51
52 #ifndef INCOMING_RETURN_ADDR_RTX
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
54 #endif
55 \f
56 /* A collected description of an entire row of the abstract CFI table. */
57 struct GTY(()) dw_cfi_row
58 {
59 /* The expression that computes the CFA, expressed in two different ways.
60 The CFA member for the simple cases, and the full CFI expression for
61 the complex cases. The later will be a DW_CFA_cfa_expression. */
62 dw_cfa_location cfa;
63 dw_cfi_ref cfa_cfi;
64
65 /* The expressions for any register column that is saved. */
66 cfi_vec reg_save;
67 };
68
69 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
70 struct GTY(()) reg_saved_in_data {
71 rtx orig_reg;
72 rtx saved_in_reg;
73 };
74
75
76 /* Since we no longer have a proper CFG, we're going to create a facsimile
77 of one on the fly while processing the frame-related insns.
78
79 We create dw_trace_info structures for each extended basic block beginning
80 and ending at a "save point". Save points are labels, barriers, certain
81 notes, and of course the beginning and end of the function.
82
83 As we encounter control transfer insns, we propagate the "current"
84 row state across the edges to the starts of traces. When checking is
85 enabled, we validate that we propagate the same data from all sources.
86
87 All traces are members of the TRACE_INFO array, in the order in which
88 they appear in the instruction stream.
89
90 All save points are present in the TRACE_INDEX hash, mapping the insn
91 starting a trace to the dw_trace_info describing the trace. */
92
93 struct dw_trace_info
94 {
95 /* The insn that begins the trace. */
96 rtx_insn *head;
97
98 /* The row state at the beginning and end of the trace. */
99 dw_cfi_row *beg_row, *end_row;
100
101 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
102 while scanning insns. However, the args_size value is irrelevant at
103 any point except can_throw_internal_p insns. Therefore the "delay"
104 sizes the values that must actually be emitted for this trace. */
105 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
106 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
107
108 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
109 rtx_insn *eh_head;
110
111 /* The following variables contain data used in interpreting frame related
112 expressions. These are not part of the "real" row state as defined by
113 Dwarf, but it seems like they need to be propagated into a trace in case
114 frame related expressions have been sunk. */
115 /* ??? This seems fragile. These variables are fragments of a larger
116 expression. If we do not keep the entire expression together, we risk
117 not being able to put it together properly. Consider forcing targets
118 to generate self-contained expressions and dropping all of the magic
119 interpretation code in this file. Or at least refusing to shrink wrap
120 any frame related insn that doesn't contain a complete expression. */
121
122 /* The register used for saving registers to the stack, and its offset
123 from the CFA. */
124 dw_cfa_location cfa_store;
125
126 /* A temporary register holding an integral value used in adjusting SP
127 or setting up the store_reg. The "offset" field holds the integer
128 value, not an offset. */
129 dw_cfa_location cfa_temp;
130
131 /* A set of registers saved in other registers. This is the inverse of
132 the row->reg_save info, if the entry is a DW_CFA_register. This is
133 implemented as a flat array because it normally contains zero or 1
134 entry, depending on the target. IA-64 is the big spender here, using
135 a maximum of 5 entries. */
136 vec<reg_saved_in_data> regs_saved_in_regs;
137
138 /* An identifier for this trace. Used only for debugging dumps. */
139 unsigned id;
140
141 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
142 bool switch_sections;
143
144 /* True if we've seen different values incoming to beg_true_args_size. */
145 bool args_size_undefined;
146 };
147
148
149 /* Hashtable helpers. */
150
151 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
152 {
153 static inline hashval_t hash (const dw_trace_info *);
154 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
155 };
156
157 inline hashval_t
158 trace_info_hasher::hash (const dw_trace_info *ti)
159 {
160 return INSN_UID (ti->head);
161 }
162
163 inline bool
164 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
165 {
166 return a->head == b->head;
167 }
168
169
170 /* The variables making up the pseudo-cfg, as described above. */
171 static vec<dw_trace_info> trace_info;
172 static vec<dw_trace_info *> trace_work_list;
173 static hash_table<trace_info_hasher> *trace_index;
174
175 /* A vector of call frame insns for the CIE. */
176 cfi_vec cie_cfi_vec;
177
178 /* The state of the first row of the FDE table, which includes the
179 state provided by the CIE. */
180 static GTY(()) dw_cfi_row *cie_cfi_row;
181
182 static GTY(()) reg_saved_in_data *cie_return_save;
183
184 static GTY(()) unsigned long dwarf2out_cfi_label_num;
185
186 /* The insn after which a new CFI note should be emitted. */
187 static rtx_insn *add_cfi_insn;
188
189 /* When non-null, add_cfi will add the CFI to this vector. */
190 static cfi_vec *add_cfi_vec;
191
192 /* The current instruction trace. */
193 static dw_trace_info *cur_trace;
194
195 /* The current, i.e. most recently generated, row of the CFI table. */
196 static dw_cfi_row *cur_row;
197
198 /* A copy of the current CFA, for use during the processing of a
199 single insn. */
200 static dw_cfa_location *cur_cfa;
201
202 /* We delay emitting a register save until either (a) we reach the end
203 of the prologue or (b) the register is clobbered. This clusters
204 register saves so that there are fewer pc advances. */
205
206 struct queued_reg_save {
207 rtx reg;
208 rtx saved_reg;
209 HOST_WIDE_INT cfa_offset;
210 };
211
212
213 static vec<queued_reg_save> queued_reg_saves;
214
215 /* True if any CFI directives were emitted at the current insn. */
216 static bool any_cfis_emitted;
217
218 /* Short-hand for commonly used register numbers. */
219 static unsigned dw_stack_pointer_regnum;
220 static unsigned dw_frame_pointer_regnum;
221 \f
222 /* Hook used by __throw. */
223
224 rtx
225 expand_builtin_dwarf_sp_column (void)
226 {
227 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
228 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
229 }
230
231 /* MEM is a memory reference for the register size table, each element of
232 which has mode MODE. Initialize column C as a return address column. */
233
234 static void
235 init_return_column_size (machine_mode mode, rtx mem, unsigned int c)
236 {
237 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
238 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
239 emit_move_insn (adjust_address (mem, mode, offset),
240 gen_int_mode (size, mode));
241 }
242
243 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
244 init_one_dwarf_reg_size to communicate on what has been done by the
245 latter. */
246
247 struct init_one_dwarf_reg_state
248 {
249 /* Whether the dwarf return column was initialized. */
250 bool wrote_return_column;
251
252 /* For each hard register REGNO, whether init_one_dwarf_reg_size
253 was given REGNO to process already. */
254 bool processed_regno [FIRST_PSEUDO_REGISTER];
255
256 };
257
258 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
259 initialize the dwarf register size table entry corresponding to register
260 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
261 use for the size entry to initialize, and INIT_STATE is the communication
262 datastructure conveying what we're doing to our caller. */
263
264 static
265 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
266 rtx table, machine_mode slotmode,
267 init_one_dwarf_reg_state *init_state)
268 {
269 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
270 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
271 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
272
273 const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
274 const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
275
276 init_state->processed_regno[regno] = true;
277
278 if (rnum >= DWARF_FRAME_REGISTERS)
279 return;
280
281 if (dnum == DWARF_FRAME_RETURN_COLUMN)
282 {
283 if (regmode == VOIDmode)
284 return;
285 init_state->wrote_return_column = true;
286 }
287
288 if (slotoffset < 0)
289 return;
290
291 emit_move_insn (adjust_address (table, slotmode, slotoffset),
292 gen_int_mode (regsize, slotmode));
293 }
294
295 /* Generate code to initialize the dwarf register size table located
296 at the provided ADDRESS. */
297
298 void
299 expand_builtin_init_dwarf_reg_sizes (tree address)
300 {
301 unsigned int i;
302 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (char_type_node);
303 rtx addr = expand_normal (address);
304 rtx mem = gen_rtx_MEM (BLKmode, addr);
305
306 init_one_dwarf_reg_state init_state;
307
308 memset ((char *)&init_state, 0, sizeof (init_state));
309
310 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
311 {
312 machine_mode save_mode;
313 rtx span;
314
315 /* No point in processing a register multiple times. This could happen
316 with register spans, e.g. when a reg is first processed as a piece of
317 a span, then as a register on its own later on. */
318
319 if (init_state.processed_regno[i])
320 continue;
321
322 save_mode = targetm.dwarf_frame_reg_mode (i);
323 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
324
325 if (!span)
326 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
327 else
328 {
329 for (int si = 0; si < XVECLEN (span, 0); si++)
330 {
331 rtx reg = XVECEXP (span, 0, si);
332
333 init_one_dwarf_reg_size
334 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
335 }
336 }
337 }
338
339 if (!init_state.wrote_return_column)
340 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
341
342 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
343 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
344 #endif
345
346 targetm.init_dwarf_reg_sizes_extra (address);
347 }
348
349 \f
350 static dw_trace_info *
351 get_trace_info (rtx_insn *insn)
352 {
353 dw_trace_info dummy;
354 dummy.head = insn;
355 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
356 }
357
358 static bool
359 save_point_p (rtx_insn *insn)
360 {
361 /* Labels, except those that are really jump tables. */
362 if (LABEL_P (insn))
363 return inside_basic_block_p (insn);
364
365 /* We split traces at the prologue/epilogue notes because those
366 are points at which the unwind info is usually stable. This
367 makes it easier to find spots with identical unwind info so
368 that we can use remember/restore_state opcodes. */
369 if (NOTE_P (insn))
370 switch (NOTE_KIND (insn))
371 {
372 case NOTE_INSN_PROLOGUE_END:
373 case NOTE_INSN_EPILOGUE_BEG:
374 return true;
375 }
376
377 return false;
378 }
379
380 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
381
382 static inline HOST_WIDE_INT
383 div_data_align (HOST_WIDE_INT off)
384 {
385 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
386 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
387 return r;
388 }
389
390 /* Return true if we need a signed version of a given opcode
391 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
392
393 static inline bool
394 need_data_align_sf_opcode (HOST_WIDE_INT off)
395 {
396 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
397 }
398
399 /* Return a pointer to a newly allocated Call Frame Instruction. */
400
401 static inline dw_cfi_ref
402 new_cfi (void)
403 {
404 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
405
406 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
407 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
408
409 return cfi;
410 }
411
412 /* Return a newly allocated CFI row, with no defined data. */
413
414 static dw_cfi_row *
415 new_cfi_row (void)
416 {
417 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
418
419 row->cfa.reg = INVALID_REGNUM;
420
421 return row;
422 }
423
424 /* Return a copy of an existing CFI row. */
425
426 static dw_cfi_row *
427 copy_cfi_row (dw_cfi_row *src)
428 {
429 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
430
431 *dst = *src;
432 dst->reg_save = vec_safe_copy (src->reg_save);
433
434 return dst;
435 }
436
437 /* Generate a new label for the CFI info to refer to. */
438
439 static char *
440 dwarf2out_cfi_label (void)
441 {
442 int num = dwarf2out_cfi_label_num++;
443 char label[20];
444
445 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
446
447 return xstrdup (label);
448 }
449
450 /* Add CFI either to the current insn stream or to a vector, or both. */
451
452 static void
453 add_cfi (dw_cfi_ref cfi)
454 {
455 any_cfis_emitted = true;
456
457 if (add_cfi_insn != NULL)
458 {
459 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
460 NOTE_CFI (add_cfi_insn) = cfi;
461 }
462
463 if (add_cfi_vec != NULL)
464 vec_safe_push (*add_cfi_vec, cfi);
465 }
466
467 static void
468 add_cfi_args_size (HOST_WIDE_INT size)
469 {
470 dw_cfi_ref cfi = new_cfi ();
471
472 /* While we can occasionally have args_size < 0 internally, this state
473 should not persist at a point we actually need an opcode. */
474 gcc_assert (size >= 0);
475
476 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
477 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
478
479 add_cfi (cfi);
480 }
481
482 static void
483 add_cfi_restore (unsigned reg)
484 {
485 dw_cfi_ref cfi = new_cfi ();
486
487 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
488 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
489
490 add_cfi (cfi);
491 }
492
493 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
494 that the register column is no longer saved. */
495
496 static void
497 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
498 {
499 if (vec_safe_length (row->reg_save) <= column)
500 vec_safe_grow_cleared (row->reg_save, column + 1);
501 (*row->reg_save)[column] = cfi;
502 }
503
504 /* This function fills in aa dw_cfa_location structure from a dwarf location
505 descriptor sequence. */
506
507 static void
508 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
509 {
510 struct dw_loc_descr_node *ptr;
511 cfa->offset = 0;
512 cfa->base_offset = 0;
513 cfa->indirect = 0;
514 cfa->reg = -1;
515
516 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
517 {
518 enum dwarf_location_atom op = ptr->dw_loc_opc;
519
520 switch (op)
521 {
522 case DW_OP_reg0:
523 case DW_OP_reg1:
524 case DW_OP_reg2:
525 case DW_OP_reg3:
526 case DW_OP_reg4:
527 case DW_OP_reg5:
528 case DW_OP_reg6:
529 case DW_OP_reg7:
530 case DW_OP_reg8:
531 case DW_OP_reg9:
532 case DW_OP_reg10:
533 case DW_OP_reg11:
534 case DW_OP_reg12:
535 case DW_OP_reg13:
536 case DW_OP_reg14:
537 case DW_OP_reg15:
538 case DW_OP_reg16:
539 case DW_OP_reg17:
540 case DW_OP_reg18:
541 case DW_OP_reg19:
542 case DW_OP_reg20:
543 case DW_OP_reg21:
544 case DW_OP_reg22:
545 case DW_OP_reg23:
546 case DW_OP_reg24:
547 case DW_OP_reg25:
548 case DW_OP_reg26:
549 case DW_OP_reg27:
550 case DW_OP_reg28:
551 case DW_OP_reg29:
552 case DW_OP_reg30:
553 case DW_OP_reg31:
554 cfa->reg = op - DW_OP_reg0;
555 break;
556 case DW_OP_regx:
557 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
558 break;
559 case DW_OP_breg0:
560 case DW_OP_breg1:
561 case DW_OP_breg2:
562 case DW_OP_breg3:
563 case DW_OP_breg4:
564 case DW_OP_breg5:
565 case DW_OP_breg6:
566 case DW_OP_breg7:
567 case DW_OP_breg8:
568 case DW_OP_breg9:
569 case DW_OP_breg10:
570 case DW_OP_breg11:
571 case DW_OP_breg12:
572 case DW_OP_breg13:
573 case DW_OP_breg14:
574 case DW_OP_breg15:
575 case DW_OP_breg16:
576 case DW_OP_breg17:
577 case DW_OP_breg18:
578 case DW_OP_breg19:
579 case DW_OP_breg20:
580 case DW_OP_breg21:
581 case DW_OP_breg22:
582 case DW_OP_breg23:
583 case DW_OP_breg24:
584 case DW_OP_breg25:
585 case DW_OP_breg26:
586 case DW_OP_breg27:
587 case DW_OP_breg28:
588 case DW_OP_breg29:
589 case DW_OP_breg30:
590 case DW_OP_breg31:
591 cfa->reg = op - DW_OP_breg0;
592 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
593 break;
594 case DW_OP_bregx:
595 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
596 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
597 break;
598 case DW_OP_deref:
599 cfa->indirect = 1;
600 break;
601 case DW_OP_plus_uconst:
602 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
603 break;
604 default:
605 gcc_unreachable ();
606 }
607 }
608 }
609
610 /* Find the previous value for the CFA, iteratively. CFI is the opcode
611 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
612 one level of remember/restore state processing. */
613
614 void
615 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
616 {
617 switch (cfi->dw_cfi_opc)
618 {
619 case DW_CFA_def_cfa_offset:
620 case DW_CFA_def_cfa_offset_sf:
621 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
622 break;
623 case DW_CFA_def_cfa_register:
624 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
625 break;
626 case DW_CFA_def_cfa:
627 case DW_CFA_def_cfa_sf:
628 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
629 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
630 break;
631 case DW_CFA_def_cfa_expression:
632 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
633 break;
634
635 case DW_CFA_remember_state:
636 gcc_assert (!remember->in_use);
637 *remember = *loc;
638 remember->in_use = 1;
639 break;
640 case DW_CFA_restore_state:
641 gcc_assert (remember->in_use);
642 *loc = *remember;
643 remember->in_use = 0;
644 break;
645
646 default:
647 break;
648 }
649 }
650
651 /* Determine if two dw_cfa_location structures define the same data. */
652
653 bool
654 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
655 {
656 return (loc1->reg == loc2->reg
657 && loc1->offset == loc2->offset
658 && loc1->indirect == loc2->indirect
659 && (loc1->indirect == 0
660 || loc1->base_offset == loc2->base_offset));
661 }
662
663 /* Determine if two CFI operands are identical. */
664
665 static bool
666 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
667 {
668 switch (t)
669 {
670 case dw_cfi_oprnd_unused:
671 return true;
672 case dw_cfi_oprnd_reg_num:
673 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
674 case dw_cfi_oprnd_offset:
675 return a->dw_cfi_offset == b->dw_cfi_offset;
676 case dw_cfi_oprnd_addr:
677 return (a->dw_cfi_addr == b->dw_cfi_addr
678 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
679 case dw_cfi_oprnd_loc:
680 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
681 }
682 gcc_unreachable ();
683 }
684
685 /* Determine if two CFI entries are identical. */
686
687 static bool
688 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
689 {
690 enum dwarf_call_frame_info opc;
691
692 /* Make things easier for our callers, including missing operands. */
693 if (a == b)
694 return true;
695 if (a == NULL || b == NULL)
696 return false;
697
698 /* Obviously, the opcodes must match. */
699 opc = a->dw_cfi_opc;
700 if (opc != b->dw_cfi_opc)
701 return false;
702
703 /* Compare the two operands, re-using the type of the operands as
704 already exposed elsewhere. */
705 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
706 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
707 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
708 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
709 }
710
711 /* Determine if two CFI_ROW structures are identical. */
712
713 static bool
714 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
715 {
716 size_t i, n_a, n_b, n_max;
717
718 if (a->cfa_cfi)
719 {
720 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
721 return false;
722 }
723 else if (!cfa_equal_p (&a->cfa, &b->cfa))
724 return false;
725
726 n_a = vec_safe_length (a->reg_save);
727 n_b = vec_safe_length (b->reg_save);
728 n_max = MAX (n_a, n_b);
729
730 for (i = 0; i < n_max; ++i)
731 {
732 dw_cfi_ref r_a = NULL, r_b = NULL;
733
734 if (i < n_a)
735 r_a = (*a->reg_save)[i];
736 if (i < n_b)
737 r_b = (*b->reg_save)[i];
738
739 if (!cfi_equal_p (r_a, r_b))
740 return false;
741 }
742
743 return true;
744 }
745
746 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
747 what opcode to emit. Returns the CFI opcode to effect the change, or
748 NULL if NEW_CFA == OLD_CFA. */
749
750 static dw_cfi_ref
751 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
752 {
753 dw_cfi_ref cfi;
754
755 /* If nothing changed, no need to issue any call frame instructions. */
756 if (cfa_equal_p (old_cfa, new_cfa))
757 return NULL;
758
759 cfi = new_cfi ();
760
761 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
762 {
763 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
764 the CFA register did not change but the offset did. The data
765 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
766 in the assembler via the .cfi_def_cfa_offset directive. */
767 if (new_cfa->offset < 0)
768 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
769 else
770 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
771 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
772 }
773 else if (new_cfa->offset == old_cfa->offset
774 && old_cfa->reg != INVALID_REGNUM
775 && !new_cfa->indirect
776 && !old_cfa->indirect)
777 {
778 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
779 indicating the CFA register has changed to <register> but the
780 offset has not changed. */
781 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
782 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
783 }
784 else if (new_cfa->indirect == 0)
785 {
786 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
787 indicating the CFA register has changed to <register> with
788 the specified offset. The data factoring for DW_CFA_def_cfa_sf
789 happens in output_cfi, or in the assembler via the .cfi_def_cfa
790 directive. */
791 if (new_cfa->offset < 0)
792 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
793 else
794 cfi->dw_cfi_opc = DW_CFA_def_cfa;
795 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
796 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
797 }
798 else
799 {
800 /* Construct a DW_CFA_def_cfa_expression instruction to
801 calculate the CFA using a full location expression since no
802 register-offset pair is available. */
803 struct dw_loc_descr_node *loc_list;
804
805 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
806 loc_list = build_cfa_loc (new_cfa, 0);
807 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
808 }
809
810 return cfi;
811 }
812
813 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
814
815 static void
816 def_cfa_1 (dw_cfa_location *new_cfa)
817 {
818 dw_cfi_ref cfi;
819
820 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
821 cur_trace->cfa_store.offset = new_cfa->offset;
822
823 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
824 if (cfi)
825 {
826 cur_row->cfa = *new_cfa;
827 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
828 ? cfi : NULL);
829
830 add_cfi (cfi);
831 }
832 }
833
834 /* Add the CFI for saving a register. REG is the CFA column number.
835 If SREG is -1, the register is saved at OFFSET from the CFA;
836 otherwise it is saved in SREG. */
837
838 static void
839 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
840 {
841 dw_fde_ref fde = cfun ? cfun->fde : NULL;
842 dw_cfi_ref cfi = new_cfi ();
843
844 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
845
846 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
847 if (fde
848 && fde->stack_realign
849 && sreg == INVALID_REGNUM)
850 {
851 cfi->dw_cfi_opc = DW_CFA_expression;
852 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
853 cfi->dw_cfi_oprnd2.dw_cfi_loc
854 = build_cfa_aligned_loc (&cur_row->cfa, offset,
855 fde->stack_realignment);
856 }
857 else if (sreg == INVALID_REGNUM)
858 {
859 if (need_data_align_sf_opcode (offset))
860 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
861 else if (reg & ~0x3f)
862 cfi->dw_cfi_opc = DW_CFA_offset_extended;
863 else
864 cfi->dw_cfi_opc = DW_CFA_offset;
865 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
866 }
867 else if (sreg == reg)
868 {
869 /* While we could emit something like DW_CFA_same_value or
870 DW_CFA_restore, we never expect to see something like that
871 in a prologue. This is more likely to be a bug. A backend
872 can always bypass this by using REG_CFA_RESTORE directly. */
873 gcc_unreachable ();
874 }
875 else
876 {
877 cfi->dw_cfi_opc = DW_CFA_register;
878 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
879 }
880
881 add_cfi (cfi);
882 update_row_reg_save (cur_row, reg, cfi);
883 }
884
885 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
886 and adjust data structures to match. */
887
888 static void
889 notice_args_size (rtx_insn *insn)
890 {
891 HOST_WIDE_INT args_size, delta;
892 rtx note;
893
894 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
895 if (note == NULL)
896 return;
897
898 args_size = INTVAL (XEXP (note, 0));
899 delta = args_size - cur_trace->end_true_args_size;
900 if (delta == 0)
901 return;
902
903 cur_trace->end_true_args_size = args_size;
904
905 /* If the CFA is computed off the stack pointer, then we must adjust
906 the computation of the CFA as well. */
907 if (cur_cfa->reg == dw_stack_pointer_regnum)
908 {
909 gcc_assert (!cur_cfa->indirect);
910
911 /* Convert a change in args_size (always a positive in the
912 direction of stack growth) to a change in stack pointer. */
913 if (!STACK_GROWS_DOWNWARD)
914 delta = -delta;
915
916 cur_cfa->offset += delta;
917 }
918 }
919
920 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
921 data within the trace related to EH insns and args_size. */
922
923 static void
924 notice_eh_throw (rtx_insn *insn)
925 {
926 HOST_WIDE_INT args_size;
927
928 args_size = cur_trace->end_true_args_size;
929 if (cur_trace->eh_head == NULL)
930 {
931 cur_trace->eh_head = insn;
932 cur_trace->beg_delay_args_size = args_size;
933 cur_trace->end_delay_args_size = args_size;
934 }
935 else if (cur_trace->end_delay_args_size != args_size)
936 {
937 cur_trace->end_delay_args_size = args_size;
938
939 /* ??? If the CFA is the stack pointer, search backward for the last
940 CFI note and insert there. Given that the stack changed for the
941 args_size change, there *must* be such a note in between here and
942 the last eh insn. */
943 add_cfi_args_size (args_size);
944 }
945 }
946
947 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
948 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
949 used in places where rtl is prohibited. */
950
951 static inline unsigned
952 dwf_regno (const_rtx reg)
953 {
954 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
955 return DWARF_FRAME_REGNUM (REGNO (reg));
956 }
957
958 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
959
960 static bool
961 compare_reg_or_pc (rtx x, rtx y)
962 {
963 if (REG_P (x) && REG_P (y))
964 return REGNO (x) == REGNO (y);
965 return x == y;
966 }
967
968 /* Record SRC as being saved in DEST. DEST may be null to delete an
969 existing entry. SRC may be a register or PC_RTX. */
970
971 static void
972 record_reg_saved_in_reg (rtx dest, rtx src)
973 {
974 reg_saved_in_data *elt;
975 size_t i;
976
977 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
978 if (compare_reg_or_pc (elt->orig_reg, src))
979 {
980 if (dest == NULL)
981 cur_trace->regs_saved_in_regs.unordered_remove (i);
982 else
983 elt->saved_in_reg = dest;
984 return;
985 }
986
987 if (dest == NULL)
988 return;
989
990 reg_saved_in_data e = {src, dest};
991 cur_trace->regs_saved_in_regs.safe_push (e);
992 }
993
994 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
995 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
996
997 static void
998 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
999 {
1000 queued_reg_save *q;
1001 queued_reg_save e = {reg, sreg, offset};
1002 size_t i;
1003
1004 /* Duplicates waste space, but it's also necessary to remove them
1005 for correctness, since the queue gets output in reverse order. */
1006 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1007 if (compare_reg_or_pc (q->reg, reg))
1008 {
1009 *q = e;
1010 return;
1011 }
1012
1013 queued_reg_saves.safe_push (e);
1014 }
1015
1016 /* Output all the entries in QUEUED_REG_SAVES. */
1017
1018 static void
1019 dwarf2out_flush_queued_reg_saves (void)
1020 {
1021 queued_reg_save *q;
1022 size_t i;
1023
1024 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1025 {
1026 unsigned int reg, sreg;
1027
1028 record_reg_saved_in_reg (q->saved_reg, q->reg);
1029
1030 if (q->reg == pc_rtx)
1031 reg = DWARF_FRAME_RETURN_COLUMN;
1032 else
1033 reg = dwf_regno (q->reg);
1034 if (q->saved_reg)
1035 sreg = dwf_regno (q->saved_reg);
1036 else
1037 sreg = INVALID_REGNUM;
1038 reg_save (reg, sreg, q->cfa_offset);
1039 }
1040
1041 queued_reg_saves.truncate (0);
1042 }
1043
1044 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1045 location for? Or, does it clobber a register which we've previously
1046 said that some other register is saved in, and for which we now
1047 have a new location for? */
1048
1049 static bool
1050 clobbers_queued_reg_save (const_rtx insn)
1051 {
1052 queued_reg_save *q;
1053 size_t iq;
1054
1055 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1056 {
1057 size_t ir;
1058 reg_saved_in_data *rir;
1059
1060 if (modified_in_p (q->reg, insn))
1061 return true;
1062
1063 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1064 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1065 && modified_in_p (rir->saved_in_reg, insn))
1066 return true;
1067 }
1068
1069 return false;
1070 }
1071
1072 /* What register, if any, is currently saved in REG? */
1073
1074 static rtx
1075 reg_saved_in (rtx reg)
1076 {
1077 unsigned int regn = REGNO (reg);
1078 queued_reg_save *q;
1079 reg_saved_in_data *rir;
1080 size_t i;
1081
1082 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1083 if (q->saved_reg && regn == REGNO (q->saved_reg))
1084 return q->reg;
1085
1086 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1087 if (regn == REGNO (rir->saved_in_reg))
1088 return rir->orig_reg;
1089
1090 return NULL_RTX;
1091 }
1092
1093 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1094
1095 static void
1096 dwarf2out_frame_debug_def_cfa (rtx pat)
1097 {
1098 memset (cur_cfa, 0, sizeof (*cur_cfa));
1099
1100 if (GET_CODE (pat) == PLUS)
1101 {
1102 cur_cfa->offset = INTVAL (XEXP (pat, 1));
1103 pat = XEXP (pat, 0);
1104 }
1105 if (MEM_P (pat))
1106 {
1107 cur_cfa->indirect = 1;
1108 pat = XEXP (pat, 0);
1109 if (GET_CODE (pat) == PLUS)
1110 {
1111 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1112 pat = XEXP (pat, 0);
1113 }
1114 }
1115 /* ??? If this fails, we could be calling into the _loc functions to
1116 define a full expression. So far no port does that. */
1117 gcc_assert (REG_P (pat));
1118 cur_cfa->reg = dwf_regno (pat);
1119 }
1120
1121 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1122
1123 static void
1124 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1125 {
1126 rtx src, dest;
1127
1128 gcc_assert (GET_CODE (pat) == SET);
1129 dest = XEXP (pat, 0);
1130 src = XEXP (pat, 1);
1131
1132 switch (GET_CODE (src))
1133 {
1134 case PLUS:
1135 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1136 cur_cfa->offset -= INTVAL (XEXP (src, 1));
1137 break;
1138
1139 case REG:
1140 break;
1141
1142 default:
1143 gcc_unreachable ();
1144 }
1145
1146 cur_cfa->reg = dwf_regno (dest);
1147 gcc_assert (cur_cfa->indirect == 0);
1148 }
1149
1150 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1151
1152 static void
1153 dwarf2out_frame_debug_cfa_offset (rtx set)
1154 {
1155 HOST_WIDE_INT offset;
1156 rtx src, addr, span;
1157 unsigned int sregno;
1158
1159 src = XEXP (set, 1);
1160 addr = XEXP (set, 0);
1161 gcc_assert (MEM_P (addr));
1162 addr = XEXP (addr, 0);
1163
1164 /* As documented, only consider extremely simple addresses. */
1165 switch (GET_CODE (addr))
1166 {
1167 case REG:
1168 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1169 offset = -cur_cfa->offset;
1170 break;
1171 case PLUS:
1172 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1173 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1174 break;
1175 default:
1176 gcc_unreachable ();
1177 }
1178
1179 if (src == pc_rtx)
1180 {
1181 span = NULL;
1182 sregno = DWARF_FRAME_RETURN_COLUMN;
1183 }
1184 else
1185 {
1186 span = targetm.dwarf_register_span (src);
1187 sregno = dwf_regno (src);
1188 }
1189
1190 /* ??? We'd like to use queue_reg_save, but we need to come up with
1191 a different flushing heuristic for epilogues. */
1192 if (!span)
1193 reg_save (sregno, INVALID_REGNUM, offset);
1194 else
1195 {
1196 /* We have a PARALLEL describing where the contents of SRC live.
1197 Adjust the offset for each piece of the PARALLEL. */
1198 HOST_WIDE_INT span_offset = offset;
1199
1200 gcc_assert (GET_CODE (span) == PARALLEL);
1201
1202 const int par_len = XVECLEN (span, 0);
1203 for (int par_index = 0; par_index < par_len; par_index++)
1204 {
1205 rtx elem = XVECEXP (span, 0, par_index);
1206 sregno = dwf_regno (src);
1207 reg_save (sregno, INVALID_REGNUM, span_offset);
1208 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1209 }
1210 }
1211 }
1212
1213 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1214
1215 static void
1216 dwarf2out_frame_debug_cfa_register (rtx set)
1217 {
1218 rtx src, dest;
1219 unsigned sregno, dregno;
1220
1221 src = XEXP (set, 1);
1222 dest = XEXP (set, 0);
1223
1224 record_reg_saved_in_reg (dest, src);
1225 if (src == pc_rtx)
1226 sregno = DWARF_FRAME_RETURN_COLUMN;
1227 else
1228 sregno = dwf_regno (src);
1229
1230 dregno = dwf_regno (dest);
1231
1232 /* ??? We'd like to use queue_reg_save, but we need to come up with
1233 a different flushing heuristic for epilogues. */
1234 reg_save (sregno, dregno, 0);
1235 }
1236
1237 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1238
1239 static void
1240 dwarf2out_frame_debug_cfa_expression (rtx set)
1241 {
1242 rtx src, dest, span;
1243 dw_cfi_ref cfi = new_cfi ();
1244 unsigned regno;
1245
1246 dest = SET_DEST (set);
1247 src = SET_SRC (set);
1248
1249 gcc_assert (REG_P (src));
1250 gcc_assert (MEM_P (dest));
1251
1252 span = targetm.dwarf_register_span (src);
1253 gcc_assert (!span);
1254
1255 regno = dwf_regno (src);
1256
1257 cfi->dw_cfi_opc = DW_CFA_expression;
1258 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1259 cfi->dw_cfi_oprnd2.dw_cfi_loc
1260 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1261 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1262
1263 /* ??? We'd like to use queue_reg_save, were the interface different,
1264 and, as above, we could manage flushing for epilogues. */
1265 add_cfi (cfi);
1266 update_row_reg_save (cur_row, regno, cfi);
1267 }
1268
1269 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1270 note. */
1271
1272 static void
1273 dwarf2out_frame_debug_cfa_val_expression (rtx set)
1274 {
1275 rtx dest = SET_DEST (set);
1276 gcc_assert (REG_P (dest));
1277
1278 rtx span = targetm.dwarf_register_span (dest);
1279 gcc_assert (!span);
1280
1281 rtx src = SET_SRC (set);
1282 dw_cfi_ref cfi = new_cfi ();
1283 cfi->dw_cfi_opc = DW_CFA_val_expression;
1284 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest);
1285 cfi->dw_cfi_oprnd2.dw_cfi_loc
1286 = mem_loc_descriptor (src, GET_MODE (src),
1287 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1288 add_cfi (cfi);
1289 update_row_reg_save (cur_row, dwf_regno (dest), cfi);
1290 }
1291
1292 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1293
1294 static void
1295 dwarf2out_frame_debug_cfa_restore (rtx reg)
1296 {
1297 gcc_assert (REG_P (reg));
1298
1299 rtx span = targetm.dwarf_register_span (reg);
1300 if (!span)
1301 {
1302 unsigned int regno = dwf_regno (reg);
1303 add_cfi_restore (regno);
1304 update_row_reg_save (cur_row, regno, NULL);
1305 }
1306 else
1307 {
1308 /* We have a PARALLEL describing where the contents of REG live.
1309 Restore the register for each piece of the PARALLEL. */
1310 gcc_assert (GET_CODE (span) == PARALLEL);
1311
1312 const int par_len = XVECLEN (span, 0);
1313 for (int par_index = 0; par_index < par_len; par_index++)
1314 {
1315 reg = XVECEXP (span, 0, par_index);
1316 gcc_assert (REG_P (reg));
1317 unsigned int regno = dwf_regno (reg);
1318 add_cfi_restore (regno);
1319 update_row_reg_save (cur_row, regno, NULL);
1320 }
1321 }
1322 }
1323
1324 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1325 ??? Perhaps we should note in the CIE where windows are saved (instead of
1326 assuming 0(cfa)) and what registers are in the window. */
1327
1328 static void
1329 dwarf2out_frame_debug_cfa_window_save (void)
1330 {
1331 dw_cfi_ref cfi = new_cfi ();
1332
1333 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1334 add_cfi (cfi);
1335 }
1336
1337 /* Record call frame debugging information for an expression EXPR,
1338 which either sets SP or FP (adjusting how we calculate the frame
1339 address) or saves a register to the stack or another register.
1340 LABEL indicates the address of EXPR.
1341
1342 This function encodes a state machine mapping rtxes to actions on
1343 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1344 users need not read the source code.
1345
1346 The High-Level Picture
1347
1348 Changes in the register we use to calculate the CFA: Currently we
1349 assume that if you copy the CFA register into another register, we
1350 should take the other one as the new CFA register; this seems to
1351 work pretty well. If it's wrong for some target, it's simple
1352 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1353
1354 Changes in the register we use for saving registers to the stack:
1355 This is usually SP, but not always. Again, we deduce that if you
1356 copy SP into another register (and SP is not the CFA register),
1357 then the new register is the one we will be using for register
1358 saves. This also seems to work.
1359
1360 Register saves: There's not much guesswork about this one; if
1361 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1362 register save, and the register used to calculate the destination
1363 had better be the one we think we're using for this purpose.
1364 It's also assumed that a copy from a call-saved register to another
1365 register is saving that register if RTX_FRAME_RELATED_P is set on
1366 that instruction. If the copy is from a call-saved register to
1367 the *same* register, that means that the register is now the same
1368 value as in the caller.
1369
1370 Except: If the register being saved is the CFA register, and the
1371 offset is nonzero, we are saving the CFA, so we assume we have to
1372 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1373 the intent is to save the value of SP from the previous frame.
1374
1375 In addition, if a register has previously been saved to a different
1376 register,
1377
1378 Invariants / Summaries of Rules
1379
1380 cfa current rule for calculating the CFA. It usually
1381 consists of a register and an offset. This is
1382 actually stored in *cur_cfa, but abbreviated
1383 for the purposes of this documentation.
1384 cfa_store register used by prologue code to save things to the stack
1385 cfa_store.offset is the offset from the value of
1386 cfa_store.reg to the actual CFA
1387 cfa_temp register holding an integral value. cfa_temp.offset
1388 stores the value, which will be used to adjust the
1389 stack pointer. cfa_temp is also used like cfa_store,
1390 to track stores to the stack via fp or a temp reg.
1391
1392 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1393 with cfa.reg as the first operand changes the cfa.reg and its
1394 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1395 cfa_temp.offset.
1396
1397 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1398 expression yielding a constant. This sets cfa_temp.reg
1399 and cfa_temp.offset.
1400
1401 Rule 5: Create a new register cfa_store used to save items to the
1402 stack.
1403
1404 Rules 10-14: Save a register to the stack. Define offset as the
1405 difference of the original location and cfa_store's
1406 location (or cfa_temp's location if cfa_temp is used).
1407
1408 Rules 16-20: If AND operation happens on sp in prologue, we assume
1409 stack is realigned. We will use a group of DW_OP_XXX
1410 expressions to represent the location of the stored
1411 register instead of CFA+offset.
1412
1413 The Rules
1414
1415 "{a,b}" indicates a choice of a xor b.
1416 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1417
1418 Rule 1:
1419 (set <reg1> <reg2>:cfa.reg)
1420 effects: cfa.reg = <reg1>
1421 cfa.offset unchanged
1422 cfa_temp.reg = <reg1>
1423 cfa_temp.offset = cfa.offset
1424
1425 Rule 2:
1426 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1427 {<const_int>,<reg>:cfa_temp.reg}))
1428 effects: cfa.reg = sp if fp used
1429 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1430 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1431 if cfa_store.reg==sp
1432
1433 Rule 3:
1434 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1435 effects: cfa.reg = fp
1436 cfa_offset += +/- <const_int>
1437
1438 Rule 4:
1439 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1440 constraints: <reg1> != fp
1441 <reg1> != sp
1442 effects: cfa.reg = <reg1>
1443 cfa_temp.reg = <reg1>
1444 cfa_temp.offset = cfa.offset
1445
1446 Rule 5:
1447 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1448 constraints: <reg1> != fp
1449 <reg1> != sp
1450 effects: cfa_store.reg = <reg1>
1451 cfa_store.offset = cfa.offset - cfa_temp.offset
1452
1453 Rule 6:
1454 (set <reg> <const_int>)
1455 effects: cfa_temp.reg = <reg>
1456 cfa_temp.offset = <const_int>
1457
1458 Rule 7:
1459 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1460 effects: cfa_temp.reg = <reg1>
1461 cfa_temp.offset |= <const_int>
1462
1463 Rule 8:
1464 (set <reg> (high <exp>))
1465 effects: none
1466
1467 Rule 9:
1468 (set <reg> (lo_sum <exp> <const_int>))
1469 effects: cfa_temp.reg = <reg>
1470 cfa_temp.offset = <const_int>
1471
1472 Rule 10:
1473 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1474 effects: cfa_store.offset -= <const_int>
1475 cfa.offset = cfa_store.offset if cfa.reg == sp
1476 cfa.reg = sp
1477 cfa.base_offset = -cfa_store.offset
1478
1479 Rule 11:
1480 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1481 effects: cfa_store.offset += -/+ mode_size(mem)
1482 cfa.offset = cfa_store.offset if cfa.reg == sp
1483 cfa.reg = sp
1484 cfa.base_offset = -cfa_store.offset
1485
1486 Rule 12:
1487 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1488
1489 <reg2>)
1490 effects: cfa.reg = <reg1>
1491 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1492
1493 Rule 13:
1494 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1495 effects: cfa.reg = <reg1>
1496 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1497
1498 Rule 14:
1499 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1500 effects: cfa.reg = <reg1>
1501 cfa.base_offset = -cfa_temp.offset
1502 cfa_temp.offset -= mode_size(mem)
1503
1504 Rule 15:
1505 (set <reg> {unspec, unspec_volatile})
1506 effects: target-dependent
1507
1508 Rule 16:
1509 (set sp (and: sp <const_int>))
1510 constraints: cfa_store.reg == sp
1511 effects: cfun->fde.stack_realign = 1
1512 cfa_store.offset = 0
1513 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1514
1515 Rule 17:
1516 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1517 effects: cfa_store.offset += -/+ mode_size(mem)
1518
1519 Rule 18:
1520 (set (mem ({pre_inc, pre_dec} sp)) fp)
1521 constraints: fde->stack_realign == 1
1522 effects: cfa_store.offset = 0
1523 cfa.reg != HARD_FRAME_POINTER_REGNUM
1524
1525 Rule 19:
1526 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1527 constraints: fde->stack_realign == 1
1528 && cfa.offset == 0
1529 && cfa.indirect == 0
1530 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1531 effects: Use DW_CFA_def_cfa_expression to define cfa
1532 cfa.reg == fde->drap_reg */
1533
1534 static void
1535 dwarf2out_frame_debug_expr (rtx expr)
1536 {
1537 rtx src, dest, span;
1538 HOST_WIDE_INT offset;
1539 dw_fde_ref fde;
1540
1541 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1542 the PARALLEL independently. The first element is always processed if
1543 it is a SET. This is for backward compatibility. Other elements
1544 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1545 flag is set in them. */
1546 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1547 {
1548 int par_index;
1549 int limit = XVECLEN (expr, 0);
1550 rtx elem;
1551
1552 /* PARALLELs have strict read-modify-write semantics, so we
1553 ought to evaluate every rvalue before changing any lvalue.
1554 It's cumbersome to do that in general, but there's an
1555 easy approximation that is enough for all current users:
1556 handle register saves before register assignments. */
1557 if (GET_CODE (expr) == PARALLEL)
1558 for (par_index = 0; par_index < limit; par_index++)
1559 {
1560 elem = XVECEXP (expr, 0, par_index);
1561 if (GET_CODE (elem) == SET
1562 && MEM_P (SET_DEST (elem))
1563 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1564 dwarf2out_frame_debug_expr (elem);
1565 }
1566
1567 for (par_index = 0; par_index < limit; par_index++)
1568 {
1569 elem = XVECEXP (expr, 0, par_index);
1570 if (GET_CODE (elem) == SET
1571 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1572 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1573 dwarf2out_frame_debug_expr (elem);
1574 }
1575 return;
1576 }
1577
1578 gcc_assert (GET_CODE (expr) == SET);
1579
1580 src = SET_SRC (expr);
1581 dest = SET_DEST (expr);
1582
1583 if (REG_P (src))
1584 {
1585 rtx rsi = reg_saved_in (src);
1586 if (rsi)
1587 src = rsi;
1588 }
1589
1590 fde = cfun->fde;
1591
1592 switch (GET_CODE (dest))
1593 {
1594 case REG:
1595 switch (GET_CODE (src))
1596 {
1597 /* Setting FP from SP. */
1598 case REG:
1599 if (cur_cfa->reg == dwf_regno (src))
1600 {
1601 /* Rule 1 */
1602 /* Update the CFA rule wrt SP or FP. Make sure src is
1603 relative to the current CFA register.
1604
1605 We used to require that dest be either SP or FP, but the
1606 ARM copies SP to a temporary register, and from there to
1607 FP. So we just rely on the backends to only set
1608 RTX_FRAME_RELATED_P on appropriate insns. */
1609 cur_cfa->reg = dwf_regno (dest);
1610 cur_trace->cfa_temp.reg = cur_cfa->reg;
1611 cur_trace->cfa_temp.offset = cur_cfa->offset;
1612 }
1613 else
1614 {
1615 /* Saving a register in a register. */
1616 gcc_assert (!fixed_regs [REGNO (dest)]
1617 /* For the SPARC and its register window. */
1618 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1619
1620 /* After stack is aligned, we can only save SP in FP
1621 if drap register is used. In this case, we have
1622 to restore stack pointer with the CFA value and we
1623 don't generate this DWARF information. */
1624 if (fde
1625 && fde->stack_realign
1626 && REGNO (src) == STACK_POINTER_REGNUM)
1627 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1628 && fde->drap_reg != INVALID_REGNUM
1629 && cur_cfa->reg != dwf_regno (src));
1630 else
1631 queue_reg_save (src, dest, 0);
1632 }
1633 break;
1634
1635 case PLUS:
1636 case MINUS:
1637 case LO_SUM:
1638 if (dest == stack_pointer_rtx)
1639 {
1640 /* Rule 2 */
1641 /* Adjusting SP. */
1642 switch (GET_CODE (XEXP (src, 1)))
1643 {
1644 case CONST_INT:
1645 offset = INTVAL (XEXP (src, 1));
1646 break;
1647 case REG:
1648 gcc_assert (dwf_regno (XEXP (src, 1))
1649 == cur_trace->cfa_temp.reg);
1650 offset = cur_trace->cfa_temp.offset;
1651 break;
1652 default:
1653 gcc_unreachable ();
1654 }
1655
1656 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1657 {
1658 /* Restoring SP from FP in the epilogue. */
1659 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1660 cur_cfa->reg = dw_stack_pointer_regnum;
1661 }
1662 else if (GET_CODE (src) == LO_SUM)
1663 /* Assume we've set the source reg of the LO_SUM from sp. */
1664 ;
1665 else
1666 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1667
1668 if (GET_CODE (src) != MINUS)
1669 offset = -offset;
1670 if (cur_cfa->reg == dw_stack_pointer_regnum)
1671 cur_cfa->offset += offset;
1672 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1673 cur_trace->cfa_store.offset += offset;
1674 }
1675 else if (dest == hard_frame_pointer_rtx)
1676 {
1677 /* Rule 3 */
1678 /* Either setting the FP from an offset of the SP,
1679 or adjusting the FP */
1680 gcc_assert (frame_pointer_needed);
1681
1682 gcc_assert (REG_P (XEXP (src, 0))
1683 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1684 && CONST_INT_P (XEXP (src, 1)));
1685 offset = INTVAL (XEXP (src, 1));
1686 if (GET_CODE (src) != MINUS)
1687 offset = -offset;
1688 cur_cfa->offset += offset;
1689 cur_cfa->reg = dw_frame_pointer_regnum;
1690 }
1691 else
1692 {
1693 gcc_assert (GET_CODE (src) != MINUS);
1694
1695 /* Rule 4 */
1696 if (REG_P (XEXP (src, 0))
1697 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1698 && CONST_INT_P (XEXP (src, 1)))
1699 {
1700 /* Setting a temporary CFA register that will be copied
1701 into the FP later on. */
1702 offset = - INTVAL (XEXP (src, 1));
1703 cur_cfa->offset += offset;
1704 cur_cfa->reg = dwf_regno (dest);
1705 /* Or used to save regs to the stack. */
1706 cur_trace->cfa_temp.reg = cur_cfa->reg;
1707 cur_trace->cfa_temp.offset = cur_cfa->offset;
1708 }
1709
1710 /* Rule 5 */
1711 else if (REG_P (XEXP (src, 0))
1712 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1713 && XEXP (src, 1) == stack_pointer_rtx)
1714 {
1715 /* Setting a scratch register that we will use instead
1716 of SP for saving registers to the stack. */
1717 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1718 cur_trace->cfa_store.reg = dwf_regno (dest);
1719 cur_trace->cfa_store.offset
1720 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1721 }
1722
1723 /* Rule 9 */
1724 else if (GET_CODE (src) == LO_SUM
1725 && CONST_INT_P (XEXP (src, 1)))
1726 {
1727 cur_trace->cfa_temp.reg = dwf_regno (dest);
1728 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1729 }
1730 else
1731 gcc_unreachable ();
1732 }
1733 break;
1734
1735 /* Rule 6 */
1736 case CONST_INT:
1737 cur_trace->cfa_temp.reg = dwf_regno (dest);
1738 cur_trace->cfa_temp.offset = INTVAL (src);
1739 break;
1740
1741 /* Rule 7 */
1742 case IOR:
1743 gcc_assert (REG_P (XEXP (src, 0))
1744 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1745 && CONST_INT_P (XEXP (src, 1)));
1746
1747 cur_trace->cfa_temp.reg = dwf_regno (dest);
1748 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1749 break;
1750
1751 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1752 which will fill in all of the bits. */
1753 /* Rule 8 */
1754 case HIGH:
1755 break;
1756
1757 /* Rule 15 */
1758 case UNSPEC:
1759 case UNSPEC_VOLATILE:
1760 /* All unspecs should be represented by REG_CFA_* notes. */
1761 gcc_unreachable ();
1762 return;
1763
1764 /* Rule 16 */
1765 case AND:
1766 /* If this AND operation happens on stack pointer in prologue,
1767 we assume the stack is realigned and we extract the
1768 alignment. */
1769 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1770 {
1771 /* We interpret reg_save differently with stack_realign set.
1772 Thus we must flush whatever we have queued first. */
1773 dwarf2out_flush_queued_reg_saves ();
1774
1775 gcc_assert (cur_trace->cfa_store.reg
1776 == dwf_regno (XEXP (src, 0)));
1777 fde->stack_realign = 1;
1778 fde->stack_realignment = INTVAL (XEXP (src, 1));
1779 cur_trace->cfa_store.offset = 0;
1780
1781 if (cur_cfa->reg != dw_stack_pointer_regnum
1782 && cur_cfa->reg != dw_frame_pointer_regnum)
1783 fde->drap_reg = cur_cfa->reg;
1784 }
1785 return;
1786
1787 default:
1788 gcc_unreachable ();
1789 }
1790 break;
1791
1792 case MEM:
1793
1794 /* Saving a register to the stack. Make sure dest is relative to the
1795 CFA register. */
1796 switch (GET_CODE (XEXP (dest, 0)))
1797 {
1798 /* Rule 10 */
1799 /* With a push. */
1800 case PRE_MODIFY:
1801 case POST_MODIFY:
1802 /* We can't handle variable size modifications. */
1803 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1804 == CONST_INT);
1805 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1806
1807 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1808 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1809
1810 cur_trace->cfa_store.offset += offset;
1811 if (cur_cfa->reg == dw_stack_pointer_regnum)
1812 cur_cfa->offset = cur_trace->cfa_store.offset;
1813
1814 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1815 offset -= cur_trace->cfa_store.offset;
1816 else
1817 offset = -cur_trace->cfa_store.offset;
1818 break;
1819
1820 /* Rule 11 */
1821 case PRE_INC:
1822 case PRE_DEC:
1823 case POST_DEC:
1824 offset = GET_MODE_SIZE (GET_MODE (dest));
1825 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1826 offset = -offset;
1827
1828 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1829 == STACK_POINTER_REGNUM)
1830 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1831
1832 cur_trace->cfa_store.offset += offset;
1833
1834 /* Rule 18: If stack is aligned, we will use FP as a
1835 reference to represent the address of the stored
1836 regiser. */
1837 if (fde
1838 && fde->stack_realign
1839 && REG_P (src)
1840 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1841 {
1842 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1843 cur_trace->cfa_store.offset = 0;
1844 }
1845
1846 if (cur_cfa->reg == dw_stack_pointer_regnum)
1847 cur_cfa->offset = cur_trace->cfa_store.offset;
1848
1849 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1850 offset += -cur_trace->cfa_store.offset;
1851 else
1852 offset = -cur_trace->cfa_store.offset;
1853 break;
1854
1855 /* Rule 12 */
1856 /* With an offset. */
1857 case PLUS:
1858 case MINUS:
1859 case LO_SUM:
1860 {
1861 unsigned int regno;
1862
1863 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1864 && REG_P (XEXP (XEXP (dest, 0), 0)));
1865 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1866 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1867 offset = -offset;
1868
1869 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1870
1871 if (cur_cfa->reg == regno)
1872 offset -= cur_cfa->offset;
1873 else if (cur_trace->cfa_store.reg == regno)
1874 offset -= cur_trace->cfa_store.offset;
1875 else
1876 {
1877 gcc_assert (cur_trace->cfa_temp.reg == regno);
1878 offset -= cur_trace->cfa_temp.offset;
1879 }
1880 }
1881 break;
1882
1883 /* Rule 13 */
1884 /* Without an offset. */
1885 case REG:
1886 {
1887 unsigned int regno = dwf_regno (XEXP (dest, 0));
1888
1889 if (cur_cfa->reg == regno)
1890 offset = -cur_cfa->offset;
1891 else if (cur_trace->cfa_store.reg == regno)
1892 offset = -cur_trace->cfa_store.offset;
1893 else
1894 {
1895 gcc_assert (cur_trace->cfa_temp.reg == regno);
1896 offset = -cur_trace->cfa_temp.offset;
1897 }
1898 }
1899 break;
1900
1901 /* Rule 14 */
1902 case POST_INC:
1903 gcc_assert (cur_trace->cfa_temp.reg
1904 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1905 offset = -cur_trace->cfa_temp.offset;
1906 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1907 break;
1908
1909 default:
1910 gcc_unreachable ();
1911 }
1912
1913 /* Rule 17 */
1914 /* If the source operand of this MEM operation is a memory,
1915 we only care how much stack grew. */
1916 if (MEM_P (src))
1917 break;
1918
1919 if (REG_P (src)
1920 && REGNO (src) != STACK_POINTER_REGNUM
1921 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1922 && dwf_regno (src) == cur_cfa->reg)
1923 {
1924 /* We're storing the current CFA reg into the stack. */
1925
1926 if (cur_cfa->offset == 0)
1927 {
1928 /* Rule 19 */
1929 /* If stack is aligned, putting CFA reg into stack means
1930 we can no longer use reg + offset to represent CFA.
1931 Here we use DW_CFA_def_cfa_expression instead. The
1932 result of this expression equals to the original CFA
1933 value. */
1934 if (fde
1935 && fde->stack_realign
1936 && cur_cfa->indirect == 0
1937 && cur_cfa->reg != dw_frame_pointer_regnum)
1938 {
1939 gcc_assert (fde->drap_reg == cur_cfa->reg);
1940
1941 cur_cfa->indirect = 1;
1942 cur_cfa->reg = dw_frame_pointer_regnum;
1943 cur_cfa->base_offset = offset;
1944 cur_cfa->offset = 0;
1945
1946 fde->drap_reg_saved = 1;
1947 break;
1948 }
1949
1950 /* If the source register is exactly the CFA, assume
1951 we're saving SP like any other register; this happens
1952 on the ARM. */
1953 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1954 break;
1955 }
1956 else
1957 {
1958 /* Otherwise, we'll need to look in the stack to
1959 calculate the CFA. */
1960 rtx x = XEXP (dest, 0);
1961
1962 if (!REG_P (x))
1963 x = XEXP (x, 0);
1964 gcc_assert (REG_P (x));
1965
1966 cur_cfa->reg = dwf_regno (x);
1967 cur_cfa->base_offset = offset;
1968 cur_cfa->indirect = 1;
1969 break;
1970 }
1971 }
1972
1973 if (REG_P (src))
1974 span = targetm.dwarf_register_span (src);
1975 else
1976 span = NULL;
1977
1978 if (!span)
1979 queue_reg_save (src, NULL_RTX, offset);
1980 else
1981 {
1982 /* We have a PARALLEL describing where the contents of SRC live.
1983 Queue register saves for each piece of the PARALLEL. */
1984 HOST_WIDE_INT span_offset = offset;
1985
1986 gcc_assert (GET_CODE (span) == PARALLEL);
1987
1988 const int par_len = XVECLEN (span, 0);
1989 for (int par_index = 0; par_index < par_len; par_index++)
1990 {
1991 rtx elem = XVECEXP (span, 0, par_index);
1992 queue_reg_save (elem, NULL_RTX, span_offset);
1993 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1994 }
1995 }
1996 break;
1997
1998 default:
1999 gcc_unreachable ();
2000 }
2001 }
2002
2003 /* Record call frame debugging information for INSN, which either sets
2004 SP or FP (adjusting how we calculate the frame address) or saves a
2005 register to the stack. */
2006
2007 static void
2008 dwarf2out_frame_debug (rtx_insn *insn)
2009 {
2010 rtx note, n, pat;
2011 bool handled_one = false;
2012
2013 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2014 switch (REG_NOTE_KIND (note))
2015 {
2016 case REG_FRAME_RELATED_EXPR:
2017 pat = XEXP (note, 0);
2018 goto do_frame_expr;
2019
2020 case REG_CFA_DEF_CFA:
2021 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2022 handled_one = true;
2023 break;
2024
2025 case REG_CFA_ADJUST_CFA:
2026 n = XEXP (note, 0);
2027 if (n == NULL)
2028 {
2029 n = PATTERN (insn);
2030 if (GET_CODE (n) == PARALLEL)
2031 n = XVECEXP (n, 0, 0);
2032 }
2033 dwarf2out_frame_debug_adjust_cfa (n);
2034 handled_one = true;
2035 break;
2036
2037 case REG_CFA_OFFSET:
2038 n = XEXP (note, 0);
2039 if (n == NULL)
2040 n = single_set (insn);
2041 dwarf2out_frame_debug_cfa_offset (n);
2042 handled_one = true;
2043 break;
2044
2045 case REG_CFA_REGISTER:
2046 n = XEXP (note, 0);
2047 if (n == NULL)
2048 {
2049 n = PATTERN (insn);
2050 if (GET_CODE (n) == PARALLEL)
2051 n = XVECEXP (n, 0, 0);
2052 }
2053 dwarf2out_frame_debug_cfa_register (n);
2054 handled_one = true;
2055 break;
2056
2057 case REG_CFA_EXPRESSION:
2058 case REG_CFA_VAL_EXPRESSION:
2059 n = XEXP (note, 0);
2060 if (n == NULL)
2061 n = single_set (insn);
2062
2063 if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION)
2064 dwarf2out_frame_debug_cfa_expression (n);
2065 else
2066 dwarf2out_frame_debug_cfa_val_expression (n);
2067
2068 handled_one = true;
2069 break;
2070
2071 case REG_CFA_RESTORE:
2072 n = XEXP (note, 0);
2073 if (n == NULL)
2074 {
2075 n = PATTERN (insn);
2076 if (GET_CODE (n) == PARALLEL)
2077 n = XVECEXP (n, 0, 0);
2078 n = XEXP (n, 0);
2079 }
2080 dwarf2out_frame_debug_cfa_restore (n);
2081 handled_one = true;
2082 break;
2083
2084 case REG_CFA_SET_VDRAP:
2085 n = XEXP (note, 0);
2086 if (REG_P (n))
2087 {
2088 dw_fde_ref fde = cfun->fde;
2089 if (fde)
2090 {
2091 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2092 if (REG_P (n))
2093 fde->vdrap_reg = dwf_regno (n);
2094 }
2095 }
2096 handled_one = true;
2097 break;
2098
2099 case REG_CFA_TOGGLE_RA_MANGLE:
2100 case REG_CFA_WINDOW_SAVE:
2101 /* We overload both of these operations onto the same DWARF opcode. */
2102 dwarf2out_frame_debug_cfa_window_save ();
2103 handled_one = true;
2104 break;
2105
2106 case REG_CFA_FLUSH_QUEUE:
2107 /* The actual flush happens elsewhere. */
2108 handled_one = true;
2109 break;
2110
2111 default:
2112 break;
2113 }
2114
2115 if (!handled_one)
2116 {
2117 pat = PATTERN (insn);
2118 do_frame_expr:
2119 dwarf2out_frame_debug_expr (pat);
2120
2121 /* Check again. A parallel can save and update the same register.
2122 We could probably check just once, here, but this is safer than
2123 removing the check at the start of the function. */
2124 if (clobbers_queued_reg_save (pat))
2125 dwarf2out_flush_queued_reg_saves ();
2126 }
2127 }
2128
2129 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2130
2131 static void
2132 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2133 {
2134 size_t i, n_old, n_new, n_max;
2135 dw_cfi_ref cfi;
2136
2137 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2138 add_cfi (new_row->cfa_cfi);
2139 else
2140 {
2141 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2142 if (cfi)
2143 add_cfi (cfi);
2144 }
2145
2146 n_old = vec_safe_length (old_row->reg_save);
2147 n_new = vec_safe_length (new_row->reg_save);
2148 n_max = MAX (n_old, n_new);
2149
2150 for (i = 0; i < n_max; ++i)
2151 {
2152 dw_cfi_ref r_old = NULL, r_new = NULL;
2153
2154 if (i < n_old)
2155 r_old = (*old_row->reg_save)[i];
2156 if (i < n_new)
2157 r_new = (*new_row->reg_save)[i];
2158
2159 if (r_old == r_new)
2160 ;
2161 else if (r_new == NULL)
2162 add_cfi_restore (i);
2163 else if (!cfi_equal_p (r_old, r_new))
2164 add_cfi (r_new);
2165 }
2166 }
2167
2168 /* Examine CFI and return true if a cfi label and set_loc is needed
2169 beforehand. Even when generating CFI assembler instructions, we
2170 still have to add the cfi to the list so that lookup_cfa_1 works
2171 later on. When -g2 and above we even need to force emitting of
2172 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2173 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2174 and so don't use convert_cfa_to_fb_loc_list. */
2175
2176 static bool
2177 cfi_label_required_p (dw_cfi_ref cfi)
2178 {
2179 if (!dwarf2out_do_cfi_asm ())
2180 return true;
2181
2182 if (dwarf_version == 2
2183 && debug_info_level > DINFO_LEVEL_TERSE
2184 && (write_symbols == DWARF2_DEBUG
2185 || write_symbols == VMS_AND_DWARF2_DEBUG))
2186 {
2187 switch (cfi->dw_cfi_opc)
2188 {
2189 case DW_CFA_def_cfa_offset:
2190 case DW_CFA_def_cfa_offset_sf:
2191 case DW_CFA_def_cfa_register:
2192 case DW_CFA_def_cfa:
2193 case DW_CFA_def_cfa_sf:
2194 case DW_CFA_def_cfa_expression:
2195 case DW_CFA_restore_state:
2196 return true;
2197 default:
2198 return false;
2199 }
2200 }
2201 return false;
2202 }
2203
2204 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2205 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2206 necessary. */
2207 static void
2208 add_cfis_to_fde (void)
2209 {
2210 dw_fde_ref fde = cfun->fde;
2211 rtx_insn *insn, *next;
2212 /* We always start with a function_begin label. */
2213 bool first = false;
2214
2215 for (insn = get_insns (); insn; insn = next)
2216 {
2217 next = NEXT_INSN (insn);
2218
2219 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2220 {
2221 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2222 /* Don't attempt to advance_loc4 between labels
2223 in different sections. */
2224 first = true;
2225 }
2226
2227 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2228 {
2229 bool required = cfi_label_required_p (NOTE_CFI (insn));
2230 while (next)
2231 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2232 {
2233 required |= cfi_label_required_p (NOTE_CFI (next));
2234 next = NEXT_INSN (next);
2235 }
2236 else if (active_insn_p (next)
2237 || (NOTE_P (next) && (NOTE_KIND (next)
2238 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2239 break;
2240 else
2241 next = NEXT_INSN (next);
2242 if (required)
2243 {
2244 int num = dwarf2out_cfi_label_num;
2245 const char *label = dwarf2out_cfi_label ();
2246 dw_cfi_ref xcfi;
2247
2248 /* Set the location counter to the new label. */
2249 xcfi = new_cfi ();
2250 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2251 : DW_CFA_advance_loc4);
2252 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2253 vec_safe_push (fde->dw_fde_cfi, xcfi);
2254
2255 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2256 NOTE_LABEL_NUMBER (tmp) = num;
2257 }
2258
2259 do
2260 {
2261 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2262 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2263 insn = NEXT_INSN (insn);
2264 }
2265 while (insn != next);
2266 first = false;
2267 }
2268 }
2269 }
2270
2271 static void dump_cfi_row (FILE *f, dw_cfi_row *row);
2272
2273 /* If LABEL is the start of a trace, then initialize the state of that
2274 trace from CUR_TRACE and CUR_ROW. */
2275
2276 static void
2277 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2278 {
2279 dw_trace_info *ti;
2280 HOST_WIDE_INT args_size;
2281
2282 ti = get_trace_info (start);
2283 gcc_assert (ti != NULL);
2284
2285 if (dump_file)
2286 {
2287 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2288 cur_trace->id, ti->id,
2289 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2290 (origin ? INSN_UID (origin) : 0));
2291 }
2292
2293 args_size = cur_trace->end_true_args_size;
2294 if (ti->beg_row == NULL)
2295 {
2296 /* This is the first time we've encountered this trace. Propagate
2297 state across the edge and push the trace onto the work list. */
2298 ti->beg_row = copy_cfi_row (cur_row);
2299 ti->beg_true_args_size = args_size;
2300
2301 ti->cfa_store = cur_trace->cfa_store;
2302 ti->cfa_temp = cur_trace->cfa_temp;
2303 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2304
2305 trace_work_list.safe_push (ti);
2306
2307 if (dump_file)
2308 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2309 }
2310 else
2311 {
2312
2313 /* We ought to have the same state incoming to a given trace no
2314 matter how we arrive at the trace. Anything else means we've
2315 got some kind of optimization error. */
2316 #if CHECKING_P
2317 if (!cfi_row_equal_p (cur_row, ti->beg_row))
2318 {
2319 if (dump_file)
2320 {
2321 fprintf (dump_file, "Inconsistent CFI state!\n");
2322 fprintf (dump_file, "SHOULD have:\n");
2323 dump_cfi_row (dump_file, ti->beg_row);
2324 fprintf (dump_file, "DO have:\n");
2325 dump_cfi_row (dump_file, cur_row);
2326 }
2327
2328 gcc_unreachable ();
2329 }
2330 #endif
2331
2332 /* The args_size is allowed to conflict if it isn't actually used. */
2333 if (ti->beg_true_args_size != args_size)
2334 ti->args_size_undefined = true;
2335 }
2336 }
2337
2338 /* Similarly, but handle the args_size and CFA reset across EH
2339 and non-local goto edges. */
2340
2341 static void
2342 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2343 {
2344 HOST_WIDE_INT save_args_size, delta;
2345 dw_cfa_location save_cfa;
2346
2347 save_args_size = cur_trace->end_true_args_size;
2348 if (save_args_size == 0)
2349 {
2350 maybe_record_trace_start (start, origin);
2351 return;
2352 }
2353
2354 delta = -save_args_size;
2355 cur_trace->end_true_args_size = 0;
2356
2357 save_cfa = cur_row->cfa;
2358 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2359 {
2360 /* Convert a change in args_size (always a positive in the
2361 direction of stack growth) to a change in stack pointer. */
2362 if (!STACK_GROWS_DOWNWARD)
2363 delta = -delta;
2364
2365 cur_row->cfa.offset += delta;
2366 }
2367
2368 maybe_record_trace_start (start, origin);
2369
2370 cur_trace->end_true_args_size = save_args_size;
2371 cur_row->cfa = save_cfa;
2372 }
2373
2374 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2375 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2376
2377 static void
2378 create_trace_edges (rtx_insn *insn)
2379 {
2380 rtx tmp;
2381 int i, n;
2382
2383 if (JUMP_P (insn))
2384 {
2385 rtx_jump_table_data *table;
2386
2387 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2388 return;
2389
2390 if (tablejump_p (insn, NULL, &table))
2391 {
2392 rtvec vec = table->get_labels ();
2393
2394 n = GET_NUM_ELEM (vec);
2395 for (i = 0; i < n; ++i)
2396 {
2397 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2398 maybe_record_trace_start (lab, insn);
2399 }
2400 }
2401 else if (computed_jump_p (insn))
2402 {
2403 rtx_insn *temp;
2404 unsigned int i;
2405 FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
2406 maybe_record_trace_start (temp, insn);
2407 }
2408 else if (returnjump_p (insn))
2409 ;
2410 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2411 {
2412 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2413 for (i = 0; i < n; ++i)
2414 {
2415 rtx_insn *lab =
2416 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2417 maybe_record_trace_start (lab, insn);
2418 }
2419 }
2420 else
2421 {
2422 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2423 gcc_assert (lab != NULL);
2424 maybe_record_trace_start (lab, insn);
2425 }
2426 }
2427 else if (CALL_P (insn))
2428 {
2429 /* Sibling calls don't have edges inside this function. */
2430 if (SIBLING_CALL_P (insn))
2431 return;
2432
2433 /* Process non-local goto edges. */
2434 if (can_nonlocal_goto (insn))
2435 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2436 lab;
2437 lab = lab->next ())
2438 maybe_record_trace_start_abnormal (lab->insn (), insn);
2439 }
2440 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2441 {
2442 int i, n = seq->len ();
2443 for (i = 0; i < n; ++i)
2444 create_trace_edges (seq->insn (i));
2445 return;
2446 }
2447
2448 /* Process EH edges. */
2449 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2450 {
2451 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2452 if (lp)
2453 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2454 }
2455 }
2456
2457 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2458
2459 static void
2460 scan_insn_after (rtx_insn *insn)
2461 {
2462 if (RTX_FRAME_RELATED_P (insn))
2463 dwarf2out_frame_debug (insn);
2464 notice_args_size (insn);
2465 }
2466
2467 /* Scan the trace beginning at INSN and create the CFI notes for the
2468 instructions therein. */
2469
2470 static void
2471 scan_trace (dw_trace_info *trace)
2472 {
2473 rtx_insn *prev, *insn = trace->head;
2474 dw_cfa_location this_cfa;
2475
2476 if (dump_file)
2477 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2478 trace->id, rtx_name[(int) GET_CODE (insn)],
2479 INSN_UID (insn));
2480
2481 trace->end_row = copy_cfi_row (trace->beg_row);
2482 trace->end_true_args_size = trace->beg_true_args_size;
2483
2484 cur_trace = trace;
2485 cur_row = trace->end_row;
2486
2487 this_cfa = cur_row->cfa;
2488 cur_cfa = &this_cfa;
2489
2490 for (prev = insn, insn = NEXT_INSN (insn);
2491 insn;
2492 prev = insn, insn = NEXT_INSN (insn))
2493 {
2494 rtx_insn *control;
2495
2496 /* Do everything that happens "before" the insn. */
2497 add_cfi_insn = prev;
2498
2499 /* Notice the end of a trace. */
2500 if (BARRIER_P (insn))
2501 {
2502 /* Don't bother saving the unneeded queued registers at all. */
2503 queued_reg_saves.truncate (0);
2504 break;
2505 }
2506 if (save_point_p (insn))
2507 {
2508 /* Propagate across fallthru edges. */
2509 dwarf2out_flush_queued_reg_saves ();
2510 maybe_record_trace_start (insn, NULL);
2511 break;
2512 }
2513
2514 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2515 continue;
2516
2517 /* Handle all changes to the row state. Sequences require special
2518 handling for the positioning of the notes. */
2519 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2520 {
2521 rtx_insn *elt;
2522 int i, n = pat->len ();
2523
2524 control = pat->insn (0);
2525 if (can_throw_internal (control))
2526 notice_eh_throw (control);
2527 dwarf2out_flush_queued_reg_saves ();
2528
2529 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2530 {
2531 /* ??? Hopefully multiple delay slots are not annulled. */
2532 gcc_assert (n == 2);
2533 gcc_assert (!RTX_FRAME_RELATED_P (control));
2534 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2535
2536 elt = pat->insn (1);
2537
2538 if (INSN_FROM_TARGET_P (elt))
2539 {
2540 HOST_WIDE_INT restore_args_size;
2541 cfi_vec save_row_reg_save;
2542
2543 /* If ELT is an instruction from target of an annulled
2544 branch, the effects are for the target only and so
2545 the args_size and CFA along the current path
2546 shouldn't change. */
2547 add_cfi_insn = NULL;
2548 restore_args_size = cur_trace->end_true_args_size;
2549 cur_cfa = &cur_row->cfa;
2550 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2551
2552 scan_insn_after (elt);
2553
2554 /* ??? Should we instead save the entire row state? */
2555 gcc_assert (!queued_reg_saves.length ());
2556
2557 create_trace_edges (control);
2558
2559 cur_trace->end_true_args_size = restore_args_size;
2560 cur_row->cfa = this_cfa;
2561 cur_row->reg_save = save_row_reg_save;
2562 cur_cfa = &this_cfa;
2563 }
2564 else
2565 {
2566 /* If ELT is a annulled branch-taken instruction (i.e.
2567 executed only when branch is not taken), the args_size
2568 and CFA should not change through the jump. */
2569 create_trace_edges (control);
2570
2571 /* Update and continue with the trace. */
2572 add_cfi_insn = insn;
2573 scan_insn_after (elt);
2574 def_cfa_1 (&this_cfa);
2575 }
2576 continue;
2577 }
2578
2579 /* The insns in the delay slot should all be considered to happen
2580 "before" a call insn. Consider a call with a stack pointer
2581 adjustment in the delay slot. The backtrace from the callee
2582 should include the sp adjustment. Unfortunately, that leaves
2583 us with an unavoidable unwinding error exactly at the call insn
2584 itself. For jump insns we'd prefer to avoid this error by
2585 placing the notes after the sequence. */
2586 if (JUMP_P (control))
2587 add_cfi_insn = insn;
2588
2589 for (i = 1; i < n; ++i)
2590 {
2591 elt = pat->insn (i);
2592 scan_insn_after (elt);
2593 }
2594
2595 /* Make sure any register saves are visible at the jump target. */
2596 dwarf2out_flush_queued_reg_saves ();
2597 any_cfis_emitted = false;
2598
2599 /* However, if there is some adjustment on the call itself, e.g.
2600 a call_pop, that action should be considered to happen after
2601 the call returns. */
2602 add_cfi_insn = insn;
2603 scan_insn_after (control);
2604 }
2605 else
2606 {
2607 /* Flush data before calls and jumps, and of course if necessary. */
2608 if (can_throw_internal (insn))
2609 {
2610 notice_eh_throw (insn);
2611 dwarf2out_flush_queued_reg_saves ();
2612 }
2613 else if (!NONJUMP_INSN_P (insn)
2614 || clobbers_queued_reg_save (insn)
2615 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2616 dwarf2out_flush_queued_reg_saves ();
2617 any_cfis_emitted = false;
2618
2619 add_cfi_insn = insn;
2620 scan_insn_after (insn);
2621 control = insn;
2622 }
2623
2624 /* Between frame-related-p and args_size we might have otherwise
2625 emitted two cfa adjustments. Do it now. */
2626 def_cfa_1 (&this_cfa);
2627
2628 /* Minimize the number of advances by emitting the entire queue
2629 once anything is emitted. */
2630 if (any_cfis_emitted
2631 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2632 dwarf2out_flush_queued_reg_saves ();
2633
2634 /* Note that a test for control_flow_insn_p does exactly the
2635 same tests as are done to actually create the edges. So
2636 always call the routine and let it not create edges for
2637 non-control-flow insns. */
2638 create_trace_edges (control);
2639 }
2640
2641 add_cfi_insn = NULL;
2642 cur_row = NULL;
2643 cur_trace = NULL;
2644 cur_cfa = NULL;
2645 }
2646
2647 /* Scan the function and create the initial set of CFI notes. */
2648
2649 static void
2650 create_cfi_notes (void)
2651 {
2652 dw_trace_info *ti;
2653
2654 gcc_checking_assert (!queued_reg_saves.exists ());
2655 gcc_checking_assert (!trace_work_list.exists ());
2656
2657 /* Always begin at the entry trace. */
2658 ti = &trace_info[0];
2659 scan_trace (ti);
2660
2661 while (!trace_work_list.is_empty ())
2662 {
2663 ti = trace_work_list.pop ();
2664 scan_trace (ti);
2665 }
2666
2667 queued_reg_saves.release ();
2668 trace_work_list.release ();
2669 }
2670
2671 /* Return the insn before the first NOTE_INSN_CFI after START. */
2672
2673 static rtx_insn *
2674 before_next_cfi_note (rtx_insn *start)
2675 {
2676 rtx_insn *prev = start;
2677 while (start)
2678 {
2679 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2680 return prev;
2681 prev = start;
2682 start = NEXT_INSN (start);
2683 }
2684 gcc_unreachable ();
2685 }
2686
2687 /* Insert CFI notes between traces to properly change state between them. */
2688
2689 static void
2690 connect_traces (void)
2691 {
2692 unsigned i, n = trace_info.length ();
2693 dw_trace_info *prev_ti, *ti;
2694
2695 /* ??? Ideally, we should have both queued and processed every trace.
2696 However the current representation of constant pools on various targets
2697 is indistinguishable from unreachable code. Assume for the moment that
2698 we can simply skip over such traces. */
2699 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2700 these are not "real" instructions, and should not be considered.
2701 This could be generically useful for tablejump data as well. */
2702 /* Remove all unprocessed traces from the list. */
2703 for (i = n - 1; i > 0; --i)
2704 {
2705 ti = &trace_info[i];
2706 if (ti->beg_row == NULL)
2707 {
2708 trace_info.ordered_remove (i);
2709 n -= 1;
2710 }
2711 else
2712 gcc_assert (ti->end_row != NULL);
2713 }
2714
2715 /* Work from the end back to the beginning. This lets us easily insert
2716 remember/restore_state notes in the correct order wrt other notes. */
2717 prev_ti = &trace_info[n - 1];
2718 for (i = n - 1; i > 0; --i)
2719 {
2720 dw_cfi_row *old_row;
2721
2722 ti = prev_ti;
2723 prev_ti = &trace_info[i - 1];
2724
2725 add_cfi_insn = ti->head;
2726
2727 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2728 for the portion of the function in the alternate text
2729 section. The row state at the very beginning of that
2730 new FDE will be exactly the row state from the CIE. */
2731 if (ti->switch_sections)
2732 old_row = cie_cfi_row;
2733 else
2734 {
2735 old_row = prev_ti->end_row;
2736 /* If there's no change from the previous end state, fine. */
2737 if (cfi_row_equal_p (old_row, ti->beg_row))
2738 ;
2739 /* Otherwise check for the common case of sharing state with
2740 the beginning of an epilogue, but not the end. Insert
2741 remember/restore opcodes in that case. */
2742 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2743 {
2744 dw_cfi_ref cfi;
2745
2746 /* Note that if we blindly insert the remember at the
2747 start of the trace, we can wind up increasing the
2748 size of the unwind info due to extra advance opcodes.
2749 Instead, put the remember immediately before the next
2750 state change. We know there must be one, because the
2751 state at the beginning and head of the trace differ. */
2752 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2753 cfi = new_cfi ();
2754 cfi->dw_cfi_opc = DW_CFA_remember_state;
2755 add_cfi (cfi);
2756
2757 add_cfi_insn = ti->head;
2758 cfi = new_cfi ();
2759 cfi->dw_cfi_opc = DW_CFA_restore_state;
2760 add_cfi (cfi);
2761
2762 old_row = prev_ti->beg_row;
2763 }
2764 /* Otherwise, we'll simply change state from the previous end. */
2765 }
2766
2767 change_cfi_row (old_row, ti->beg_row);
2768
2769 if (dump_file && add_cfi_insn != ti->head)
2770 {
2771 rtx_insn *note;
2772
2773 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2774 prev_ti->id, ti->id);
2775
2776 note = ti->head;
2777 do
2778 {
2779 note = NEXT_INSN (note);
2780 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2781 output_cfi_directive (dump_file, NOTE_CFI (note));
2782 }
2783 while (note != add_cfi_insn);
2784 }
2785 }
2786
2787 /* Connect args_size between traces that have can_throw_internal insns. */
2788 if (cfun->eh->lp_array)
2789 {
2790 HOST_WIDE_INT prev_args_size = 0;
2791
2792 for (i = 0; i < n; ++i)
2793 {
2794 ti = &trace_info[i];
2795
2796 if (ti->switch_sections)
2797 prev_args_size = 0;
2798 if (ti->eh_head == NULL)
2799 continue;
2800 gcc_assert (!ti->args_size_undefined);
2801
2802 if (ti->beg_delay_args_size != prev_args_size)
2803 {
2804 /* ??? Search back to previous CFI note. */
2805 add_cfi_insn = PREV_INSN (ti->eh_head);
2806 add_cfi_args_size (ti->beg_delay_args_size);
2807 }
2808
2809 prev_args_size = ti->end_delay_args_size;
2810 }
2811 }
2812 }
2813
2814 /* Set up the pseudo-cfg of instruction traces, as described at the
2815 block comment at the top of the file. */
2816
2817 static void
2818 create_pseudo_cfg (void)
2819 {
2820 bool saw_barrier, switch_sections;
2821 dw_trace_info ti;
2822 rtx_insn *insn;
2823 unsigned i;
2824
2825 /* The first trace begins at the start of the function,
2826 and begins with the CIE row state. */
2827 trace_info.create (16);
2828 memset (&ti, 0, sizeof (ti));
2829 ti.head = get_insns ();
2830 ti.beg_row = cie_cfi_row;
2831 ti.cfa_store = cie_cfi_row->cfa;
2832 ti.cfa_temp.reg = INVALID_REGNUM;
2833 trace_info.quick_push (ti);
2834
2835 if (cie_return_save)
2836 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2837
2838 /* Walk all the insns, collecting start of trace locations. */
2839 saw_barrier = false;
2840 switch_sections = false;
2841 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2842 {
2843 if (BARRIER_P (insn))
2844 saw_barrier = true;
2845 else if (NOTE_P (insn)
2846 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2847 {
2848 /* We should have just seen a barrier. */
2849 gcc_assert (saw_barrier);
2850 switch_sections = true;
2851 }
2852 /* Watch out for save_point notes between basic blocks.
2853 In particular, a note after a barrier. Do not record these,
2854 delaying trace creation until the label. */
2855 else if (save_point_p (insn)
2856 && (LABEL_P (insn) || !saw_barrier))
2857 {
2858 memset (&ti, 0, sizeof (ti));
2859 ti.head = insn;
2860 ti.switch_sections = switch_sections;
2861 ti.id = trace_info.length ();
2862 trace_info.safe_push (ti);
2863
2864 saw_barrier = false;
2865 switch_sections = false;
2866 }
2867 }
2868
2869 /* Create the trace index after we've finished building trace_info,
2870 avoiding stale pointer problems due to reallocation. */
2871 trace_index
2872 = new hash_table<trace_info_hasher> (trace_info.length ());
2873 dw_trace_info *tp;
2874 FOR_EACH_VEC_ELT (trace_info, i, tp)
2875 {
2876 dw_trace_info **slot;
2877
2878 if (dump_file)
2879 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2880 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2881 tp->switch_sections ? " (section switch)" : "");
2882
2883 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2884 gcc_assert (*slot == NULL);
2885 *slot = tp;
2886 }
2887 }
2888
2889 /* Record the initial position of the return address. RTL is
2890 INCOMING_RETURN_ADDR_RTX. */
2891
2892 static void
2893 initial_return_save (rtx rtl)
2894 {
2895 unsigned int reg = INVALID_REGNUM;
2896 HOST_WIDE_INT offset = 0;
2897
2898 switch (GET_CODE (rtl))
2899 {
2900 case REG:
2901 /* RA is in a register. */
2902 reg = dwf_regno (rtl);
2903 break;
2904
2905 case MEM:
2906 /* RA is on the stack. */
2907 rtl = XEXP (rtl, 0);
2908 switch (GET_CODE (rtl))
2909 {
2910 case REG:
2911 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2912 offset = 0;
2913 break;
2914
2915 case PLUS:
2916 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2917 offset = INTVAL (XEXP (rtl, 1));
2918 break;
2919
2920 case MINUS:
2921 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2922 offset = -INTVAL (XEXP (rtl, 1));
2923 break;
2924
2925 default:
2926 gcc_unreachable ();
2927 }
2928
2929 break;
2930
2931 case PLUS:
2932 /* The return address is at some offset from any value we can
2933 actually load. For instance, on the SPARC it is in %i7+8. Just
2934 ignore the offset for now; it doesn't matter for unwinding frames. */
2935 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2936 initial_return_save (XEXP (rtl, 0));
2937 return;
2938
2939 default:
2940 gcc_unreachable ();
2941 }
2942
2943 if (reg != DWARF_FRAME_RETURN_COLUMN)
2944 {
2945 if (reg != INVALID_REGNUM)
2946 record_reg_saved_in_reg (rtl, pc_rtx);
2947 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2948 }
2949 }
2950
2951 static void
2952 create_cie_data (void)
2953 {
2954 dw_cfa_location loc;
2955 dw_trace_info cie_trace;
2956
2957 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2958
2959 memset (&cie_trace, 0, sizeof (cie_trace));
2960 cur_trace = &cie_trace;
2961
2962 add_cfi_vec = &cie_cfi_vec;
2963 cie_cfi_row = cur_row = new_cfi_row ();
2964
2965 /* On entry, the Canonical Frame Address is at SP. */
2966 memset (&loc, 0, sizeof (loc));
2967 loc.reg = dw_stack_pointer_regnum;
2968 loc.offset = INCOMING_FRAME_SP_OFFSET;
2969 def_cfa_1 (&loc);
2970
2971 if (targetm.debug_unwind_info () == UI_DWARF2
2972 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2973 {
2974 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2975
2976 /* For a few targets, we have the return address incoming into a
2977 register, but choose a different return column. This will result
2978 in a DW_CFA_register for the return, and an entry in
2979 regs_saved_in_regs to match. If the target later stores that
2980 return address register to the stack, we want to be able to emit
2981 the DW_CFA_offset against the return column, not the intermediate
2982 save register. Save the contents of regs_saved_in_regs so that
2983 we can re-initialize it at the start of each function. */
2984 switch (cie_trace.regs_saved_in_regs.length ())
2985 {
2986 case 0:
2987 break;
2988 case 1:
2989 cie_return_save = ggc_alloc<reg_saved_in_data> ();
2990 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2991 cie_trace.regs_saved_in_regs.release ();
2992 break;
2993 default:
2994 gcc_unreachable ();
2995 }
2996 }
2997
2998 add_cfi_vec = NULL;
2999 cur_row = NULL;
3000 cur_trace = NULL;
3001 }
3002
3003 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3004 state at each location within the function. These notes will be
3005 emitted during pass_final. */
3006
3007 static unsigned int
3008 execute_dwarf2_frame (void)
3009 {
3010 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
3011 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
3012
3013 /* The first time we're called, compute the incoming frame state. */
3014 if (cie_cfi_vec == NULL)
3015 create_cie_data ();
3016
3017 dwarf2out_alloc_current_fde ();
3018
3019 create_pseudo_cfg ();
3020
3021 /* Do the work. */
3022 create_cfi_notes ();
3023 connect_traces ();
3024 add_cfis_to_fde ();
3025
3026 /* Free all the data we allocated. */
3027 {
3028 size_t i;
3029 dw_trace_info *ti;
3030
3031 FOR_EACH_VEC_ELT (trace_info, i, ti)
3032 ti->regs_saved_in_regs.release ();
3033 }
3034 trace_info.release ();
3035
3036 delete trace_index;
3037 trace_index = NULL;
3038
3039 return 0;
3040 }
3041 \f
3042 /* Convert a DWARF call frame info. operation to its string name */
3043
3044 static const char *
3045 dwarf_cfi_name (unsigned int cfi_opc)
3046 {
3047 const char *name = get_DW_CFA_name (cfi_opc);
3048
3049 if (name != NULL)
3050 return name;
3051
3052 return "DW_CFA_<unknown>";
3053 }
3054
3055 /* This routine will generate the correct assembly data for a location
3056 description based on a cfi entry with a complex address. */
3057
3058 static void
3059 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3060 {
3061 dw_loc_descr_ref loc;
3062 unsigned long size;
3063
3064 if (cfi->dw_cfi_opc == DW_CFA_expression
3065 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3066 {
3067 unsigned r =
3068 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3069 dw2_asm_output_data (1, r, NULL);
3070 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3071 }
3072 else
3073 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3074
3075 /* Output the size of the block. */
3076 size = size_of_locs (loc);
3077 dw2_asm_output_data_uleb128 (size, NULL);
3078
3079 /* Now output the operations themselves. */
3080 output_loc_sequence (loc, for_eh);
3081 }
3082
3083 /* Similar, but used for .cfi_escape. */
3084
3085 static void
3086 output_cfa_loc_raw (dw_cfi_ref cfi)
3087 {
3088 dw_loc_descr_ref loc;
3089 unsigned long size;
3090
3091 if (cfi->dw_cfi_opc == DW_CFA_expression
3092 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3093 {
3094 unsigned r =
3095 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3096 fprintf (asm_out_file, "%#x,", r);
3097 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3098 }
3099 else
3100 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3101
3102 /* Output the size of the block. */
3103 size = size_of_locs (loc);
3104 dw2_asm_output_data_uleb128_raw (size);
3105 fputc (',', asm_out_file);
3106
3107 /* Now output the operations themselves. */
3108 output_loc_sequence_raw (loc);
3109 }
3110
3111 /* Output a Call Frame Information opcode and its operand(s). */
3112
3113 void
3114 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3115 {
3116 unsigned long r;
3117 HOST_WIDE_INT off;
3118
3119 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3120 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3121 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3122 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3123 ((unsigned HOST_WIDE_INT)
3124 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3125 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3126 {
3127 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3128 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3129 "DW_CFA_offset, column %#lx", r);
3130 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3131 dw2_asm_output_data_uleb128 (off, NULL);
3132 }
3133 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3134 {
3135 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3136 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3137 "DW_CFA_restore, column %#lx", r);
3138 }
3139 else
3140 {
3141 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3142 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3143
3144 switch (cfi->dw_cfi_opc)
3145 {
3146 case DW_CFA_set_loc:
3147 if (for_eh)
3148 dw2_asm_output_encoded_addr_rtx (
3149 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3150 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3151 false, NULL);
3152 else
3153 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3154 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3155 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3156 break;
3157
3158 case DW_CFA_advance_loc1:
3159 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3160 fde->dw_fde_current_label, NULL);
3161 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3162 break;
3163
3164 case DW_CFA_advance_loc2:
3165 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3166 fde->dw_fde_current_label, NULL);
3167 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3168 break;
3169
3170 case DW_CFA_advance_loc4:
3171 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3172 fde->dw_fde_current_label, NULL);
3173 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3174 break;
3175
3176 case DW_CFA_MIPS_advance_loc8:
3177 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3178 fde->dw_fde_current_label, NULL);
3179 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3180 break;
3181
3182 case DW_CFA_offset_extended:
3183 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3184 dw2_asm_output_data_uleb128 (r, NULL);
3185 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3186 dw2_asm_output_data_uleb128 (off, NULL);
3187 break;
3188
3189 case DW_CFA_def_cfa:
3190 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3191 dw2_asm_output_data_uleb128 (r, NULL);
3192 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3193 break;
3194
3195 case DW_CFA_offset_extended_sf:
3196 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3197 dw2_asm_output_data_uleb128 (r, NULL);
3198 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3199 dw2_asm_output_data_sleb128 (off, NULL);
3200 break;
3201
3202 case DW_CFA_def_cfa_sf:
3203 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3204 dw2_asm_output_data_uleb128 (r, NULL);
3205 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3206 dw2_asm_output_data_sleb128 (off, NULL);
3207 break;
3208
3209 case DW_CFA_restore_extended:
3210 case DW_CFA_undefined:
3211 case DW_CFA_same_value:
3212 case DW_CFA_def_cfa_register:
3213 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3214 dw2_asm_output_data_uleb128 (r, NULL);
3215 break;
3216
3217 case DW_CFA_register:
3218 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3219 dw2_asm_output_data_uleb128 (r, NULL);
3220 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3221 dw2_asm_output_data_uleb128 (r, NULL);
3222 break;
3223
3224 case DW_CFA_def_cfa_offset:
3225 case DW_CFA_GNU_args_size:
3226 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3227 break;
3228
3229 case DW_CFA_def_cfa_offset_sf:
3230 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3231 dw2_asm_output_data_sleb128 (off, NULL);
3232 break;
3233
3234 case DW_CFA_GNU_window_save:
3235 break;
3236
3237 case DW_CFA_def_cfa_expression:
3238 case DW_CFA_expression:
3239 case DW_CFA_val_expression:
3240 output_cfa_loc (cfi, for_eh);
3241 break;
3242
3243 case DW_CFA_GNU_negative_offset_extended:
3244 /* Obsoleted by DW_CFA_offset_extended_sf. */
3245 gcc_unreachable ();
3246
3247 default:
3248 break;
3249 }
3250 }
3251 }
3252
3253 /* Similar, but do it via assembler directives instead. */
3254
3255 void
3256 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3257 {
3258 unsigned long r, r2;
3259
3260 switch (cfi->dw_cfi_opc)
3261 {
3262 case DW_CFA_advance_loc:
3263 case DW_CFA_advance_loc1:
3264 case DW_CFA_advance_loc2:
3265 case DW_CFA_advance_loc4:
3266 case DW_CFA_MIPS_advance_loc8:
3267 case DW_CFA_set_loc:
3268 /* Should only be created in a code path not followed when emitting
3269 via directives. The assembler is going to take care of this for
3270 us. But this routines is also used for debugging dumps, so
3271 print something. */
3272 gcc_assert (f != asm_out_file);
3273 fprintf (f, "\t.cfi_advance_loc\n");
3274 break;
3275
3276 case DW_CFA_offset:
3277 case DW_CFA_offset_extended:
3278 case DW_CFA_offset_extended_sf:
3279 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3280 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3281 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3282 break;
3283
3284 case DW_CFA_restore:
3285 case DW_CFA_restore_extended:
3286 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3287 fprintf (f, "\t.cfi_restore %lu\n", r);
3288 break;
3289
3290 case DW_CFA_undefined:
3291 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3292 fprintf (f, "\t.cfi_undefined %lu\n", r);
3293 break;
3294
3295 case DW_CFA_same_value:
3296 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3297 fprintf (f, "\t.cfi_same_value %lu\n", r);
3298 break;
3299
3300 case DW_CFA_def_cfa:
3301 case DW_CFA_def_cfa_sf:
3302 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3303 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3304 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3305 break;
3306
3307 case DW_CFA_def_cfa_register:
3308 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3309 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3310 break;
3311
3312 case DW_CFA_register:
3313 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3314 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3315 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3316 break;
3317
3318 case DW_CFA_def_cfa_offset:
3319 case DW_CFA_def_cfa_offset_sf:
3320 fprintf (f, "\t.cfi_def_cfa_offset "
3321 HOST_WIDE_INT_PRINT_DEC"\n",
3322 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3323 break;
3324
3325 case DW_CFA_remember_state:
3326 fprintf (f, "\t.cfi_remember_state\n");
3327 break;
3328 case DW_CFA_restore_state:
3329 fprintf (f, "\t.cfi_restore_state\n");
3330 break;
3331
3332 case DW_CFA_GNU_args_size:
3333 if (f == asm_out_file)
3334 {
3335 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3336 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3337 if (flag_debug_asm)
3338 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3339 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3340 fputc ('\n', f);
3341 }
3342 else
3343 {
3344 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3345 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3346 }
3347 break;
3348
3349 case DW_CFA_GNU_window_save:
3350 fprintf (f, "\t.cfi_window_save\n");
3351 break;
3352
3353 case DW_CFA_def_cfa_expression:
3354 case DW_CFA_expression:
3355 case DW_CFA_val_expression:
3356 if (f != asm_out_file)
3357 {
3358 fprintf (f, "\t.cfi_%scfa_%sexpression ...\n",
3359 cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "",
3360 cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : "");
3361 break;
3362 }
3363 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3364 output_cfa_loc_raw (cfi);
3365 fputc ('\n', f);
3366 break;
3367
3368 default:
3369 gcc_unreachable ();
3370 }
3371 }
3372
3373 void
3374 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3375 {
3376 if (dwarf2out_do_cfi_asm ())
3377 output_cfi_directive (asm_out_file, cfi);
3378 }
3379
3380 static void
3381 dump_cfi_row (FILE *f, dw_cfi_row *row)
3382 {
3383 dw_cfi_ref cfi;
3384 unsigned i;
3385
3386 cfi = row->cfa_cfi;
3387 if (!cfi)
3388 {
3389 dw_cfa_location dummy;
3390 memset (&dummy, 0, sizeof (dummy));
3391 dummy.reg = INVALID_REGNUM;
3392 cfi = def_cfa_0 (&dummy, &row->cfa);
3393 }
3394 output_cfi_directive (f, cfi);
3395
3396 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3397 if (cfi)
3398 output_cfi_directive (f, cfi);
3399 }
3400
3401 void debug_cfi_row (dw_cfi_row *row);
3402
3403 void
3404 debug_cfi_row (dw_cfi_row *row)
3405 {
3406 dump_cfi_row (stderr, row);
3407 }
3408 \f
3409
3410 /* Save the result of dwarf2out_do_frame across PCH.
3411 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3412 static GTY(()) signed char saved_do_cfi_asm = 0;
3413
3414 /* Decide whether we want to emit frame unwind information for the current
3415 translation unit. */
3416
3417 bool
3418 dwarf2out_do_frame (void)
3419 {
3420 /* We want to emit correct CFA location expressions or lists, so we
3421 have to return true if we're going to output debug info, even if
3422 we're not going to output frame or unwind info. */
3423 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3424 return true;
3425
3426 if (saved_do_cfi_asm > 0)
3427 return true;
3428
3429 if (targetm.debug_unwind_info () == UI_DWARF2)
3430 return true;
3431
3432 if ((flag_unwind_tables || flag_exceptions)
3433 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3434 return true;
3435
3436 return false;
3437 }
3438
3439 /* Decide whether to emit frame unwind via assembler directives. */
3440
3441 bool
3442 dwarf2out_do_cfi_asm (void)
3443 {
3444 int enc;
3445
3446 if (saved_do_cfi_asm != 0)
3447 return saved_do_cfi_asm > 0;
3448
3449 /* Assume failure for a moment. */
3450 saved_do_cfi_asm = -1;
3451
3452 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3453 return false;
3454 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3455 return false;
3456
3457 /* Make sure the personality encoding is one the assembler can support.
3458 In particular, aligned addresses can't be handled. */
3459 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3460 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3461 return false;
3462 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3463 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3464 return false;
3465
3466 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3467 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3468 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3469 && !flag_unwind_tables && !flag_exceptions
3470 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3471 return false;
3472
3473 /* Success! */
3474 saved_do_cfi_asm = 1;
3475 return true;
3476 }
3477
3478 namespace {
3479
3480 const pass_data pass_data_dwarf2_frame =
3481 {
3482 RTL_PASS, /* type */
3483 "dwarf2", /* name */
3484 OPTGROUP_NONE, /* optinfo_flags */
3485 TV_FINAL, /* tv_id */
3486 0, /* properties_required */
3487 0, /* properties_provided */
3488 0, /* properties_destroyed */
3489 0, /* todo_flags_start */
3490 0, /* todo_flags_finish */
3491 };
3492
3493 class pass_dwarf2_frame : public rtl_opt_pass
3494 {
3495 public:
3496 pass_dwarf2_frame (gcc::context *ctxt)
3497 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3498 {}
3499
3500 /* opt_pass methods: */
3501 virtual bool gate (function *);
3502 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3503
3504 }; // class pass_dwarf2_frame
3505
3506 bool
3507 pass_dwarf2_frame::gate (function *)
3508 {
3509 /* Targets which still implement the prologue in assembler text
3510 cannot use the generic dwarf2 unwinding. */
3511 if (!targetm.have_prologue ())
3512 return false;
3513
3514 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3515 from the optimized shrink-wrapping annotations that we will compute.
3516 For now, only produce the CFI notes for dwarf2. */
3517 return dwarf2out_do_frame ();
3518 }
3519
3520 } // anon namespace
3521
3522 rtl_opt_pass *
3523 make_pass_dwarf2_frame (gcc::context *ctxt)
3524 {
3525 return new pass_dwarf2_frame (ctxt);
3526 }
3527
3528 #include "gt-dwarf2cfi.h"