]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgexpand.c
Force IFN_LOAD/STORE_LANES operands to be memory (PR91577)
[thirdparty/gcc.git] / gcc / cfgexpand.c
1 /* A pass for lowering trees to RTL.
2 Copyright (C) 2004-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "ssa.h"
33 #include "optabs.h"
34 #include "regs.h" /* For reg_renumber. */
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "cgraph.h"
38 #include "diagnostic.h"
39 #include "fold-const.h"
40 #include "varasm.h"
41 #include "stor-layout.h"
42 #include "stmt.h"
43 #include "print-tree.h"
44 #include "cfgrtl.h"
45 #include "cfganal.h"
46 #include "cfgbuild.h"
47 #include "cfgcleanup.h"
48 #include "dojump.h"
49 #include "explow.h"
50 #include "calls.h"
51 #include "expr.h"
52 #include "internal-fn.h"
53 #include "tree-eh.h"
54 #include "gimple-iterator.h"
55 #include "gimple-expr.h"
56 #include "gimple-walk.h"
57 #include "tree-cfg.h"
58 #include "tree-dfa.h"
59 #include "tree-ssa.h"
60 #include "except.h"
61 #include "gimple-pretty-print.h"
62 #include "toplev.h"
63 #include "debug.h"
64 #include "params.h"
65 #include "tree-inline.h"
66 #include "value-prof.h"
67 #include "tree-ssa-live.h"
68 #include "tree-outof-ssa.h"
69 #include "cfgloop.h"
70 #include "insn-attr.h" /* For INSN_SCHEDULING. */
71 #include "stringpool.h"
72 #include "attribs.h"
73 #include "asan.h"
74 #include "tree-ssa-address.h"
75 #include "output.h"
76 #include "builtins.h"
77
78 /* Some systems use __main in a way incompatible with its use in gcc, in these
79 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
80 give the same symbol without quotes for an alternative entry point. You
81 must define both, or neither. */
82 #ifndef NAME__MAIN
83 #define NAME__MAIN "__main"
84 #endif
85
86 /* This variable holds information helping the rewriting of SSA trees
87 into RTL. */
88 struct ssaexpand SA;
89
90 /* This variable holds the currently expanded gimple statement for purposes
91 of comminucating the profile info to the builtin expanders. */
92 gimple *currently_expanding_gimple_stmt;
93
94 static rtx expand_debug_expr (tree);
95
96 static bool defer_stack_allocation (tree, bool);
97
98 static void record_alignment_for_reg_var (unsigned int);
99
100 /* Return an expression tree corresponding to the RHS of GIMPLE
101 statement STMT. */
102
103 tree
104 gimple_assign_rhs_to_tree (gimple *stmt)
105 {
106 tree t;
107 enum gimple_rhs_class grhs_class;
108
109 grhs_class = get_gimple_rhs_class (gimple_expr_code (stmt));
110
111 if (grhs_class == GIMPLE_TERNARY_RHS)
112 t = build3 (gimple_assign_rhs_code (stmt),
113 TREE_TYPE (gimple_assign_lhs (stmt)),
114 gimple_assign_rhs1 (stmt),
115 gimple_assign_rhs2 (stmt),
116 gimple_assign_rhs3 (stmt));
117 else if (grhs_class == GIMPLE_BINARY_RHS)
118 t = build2 (gimple_assign_rhs_code (stmt),
119 TREE_TYPE (gimple_assign_lhs (stmt)),
120 gimple_assign_rhs1 (stmt),
121 gimple_assign_rhs2 (stmt));
122 else if (grhs_class == GIMPLE_UNARY_RHS)
123 t = build1 (gimple_assign_rhs_code (stmt),
124 TREE_TYPE (gimple_assign_lhs (stmt)),
125 gimple_assign_rhs1 (stmt));
126 else if (grhs_class == GIMPLE_SINGLE_RHS)
127 {
128 t = gimple_assign_rhs1 (stmt);
129 /* Avoid modifying this tree in place below. */
130 if ((gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t)
131 && gimple_location (stmt) != EXPR_LOCATION (t))
132 || (gimple_block (stmt)
133 && currently_expanding_to_rtl
134 && EXPR_P (t)))
135 t = copy_node (t);
136 }
137 else
138 gcc_unreachable ();
139
140 if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t))
141 SET_EXPR_LOCATION (t, gimple_location (stmt));
142
143 return t;
144 }
145
146
147 #ifndef STACK_ALIGNMENT_NEEDED
148 #define STACK_ALIGNMENT_NEEDED 1
149 #endif
150
151 #define SSAVAR(x) (TREE_CODE (x) == SSA_NAME ? SSA_NAME_VAR (x) : x)
152
153 /* Choose either CUR or NEXT as the leader DECL for a partition.
154 Prefer ignored decls, to simplify debug dumps and reduce ambiguity
155 out of the same user variable being in multiple partitions (this is
156 less likely for compiler-introduced temps). */
157
158 static tree
159 leader_merge (tree cur, tree next)
160 {
161 if (cur == NULL || cur == next)
162 return next;
163
164 if (DECL_P (cur) && DECL_IGNORED_P (cur))
165 return cur;
166
167 if (DECL_P (next) && DECL_IGNORED_P (next))
168 return next;
169
170 return cur;
171 }
172
173 /* Associate declaration T with storage space X. If T is no
174 SSA name this is exactly SET_DECL_RTL, otherwise make the
175 partition of T associated with X. */
176 static inline void
177 set_rtl (tree t, rtx x)
178 {
179 gcc_checking_assert (!x
180 || !(TREE_CODE (t) == SSA_NAME || is_gimple_reg (t))
181 || (use_register_for_decl (t)
182 ? (REG_P (x)
183 || (GET_CODE (x) == CONCAT
184 && (REG_P (XEXP (x, 0))
185 || SUBREG_P (XEXP (x, 0)))
186 && (REG_P (XEXP (x, 1))
187 || SUBREG_P (XEXP (x, 1))))
188 /* We need to accept PARALLELs for RESUT_DECLs
189 because of vector types with BLKmode returned
190 in multiple registers, but they are supposed
191 to be uncoalesced. */
192 || (GET_CODE (x) == PARALLEL
193 && SSAVAR (t)
194 && TREE_CODE (SSAVAR (t)) == RESULT_DECL
195 && (GET_MODE (x) == BLKmode
196 || !flag_tree_coalesce_vars)))
197 : (MEM_P (x) || x == pc_rtx
198 || (GET_CODE (x) == CONCAT
199 && MEM_P (XEXP (x, 0))
200 && MEM_P (XEXP (x, 1))))));
201 /* Check that the RTL for SSA_NAMEs and gimple-reg PARM_DECLs and
202 RESULT_DECLs has the expected mode. For memory, we accept
203 unpromoted modes, since that's what we're likely to get. For
204 PARM_DECLs and RESULT_DECLs, we'll have been called by
205 set_parm_rtl, which will give us the default def, so we don't
206 have to compute it ourselves. For RESULT_DECLs, we accept mode
207 mismatches too, as long as we have BLKmode or are not coalescing
208 across variables, so that we don't reject BLKmode PARALLELs or
209 unpromoted REGs. */
210 gcc_checking_assert (!x || x == pc_rtx || TREE_CODE (t) != SSA_NAME
211 || (SSAVAR (t)
212 && TREE_CODE (SSAVAR (t)) == RESULT_DECL
213 && (promote_ssa_mode (t, NULL) == BLKmode
214 || !flag_tree_coalesce_vars))
215 || !use_register_for_decl (t)
216 || GET_MODE (x) == promote_ssa_mode (t, NULL));
217
218 if (x)
219 {
220 bool skip = false;
221 tree cur = NULL_TREE;
222 rtx xm = x;
223
224 retry:
225 if (MEM_P (xm))
226 cur = MEM_EXPR (xm);
227 else if (REG_P (xm))
228 cur = REG_EXPR (xm);
229 else if (SUBREG_P (xm))
230 {
231 gcc_assert (subreg_lowpart_p (xm));
232 xm = SUBREG_REG (xm);
233 goto retry;
234 }
235 else if (GET_CODE (xm) == CONCAT)
236 {
237 xm = XEXP (xm, 0);
238 goto retry;
239 }
240 else if (GET_CODE (xm) == PARALLEL)
241 {
242 xm = XVECEXP (xm, 0, 0);
243 gcc_assert (GET_CODE (xm) == EXPR_LIST);
244 xm = XEXP (xm, 0);
245 goto retry;
246 }
247 else if (xm == pc_rtx)
248 skip = true;
249 else
250 gcc_unreachable ();
251
252 tree next = skip ? cur : leader_merge (cur, SSAVAR (t) ? SSAVAR (t) : t);
253
254 if (cur != next)
255 {
256 if (MEM_P (x))
257 set_mem_attributes (x,
258 next && TREE_CODE (next) == SSA_NAME
259 ? TREE_TYPE (next)
260 : next, true);
261 else
262 set_reg_attrs_for_decl_rtl (next, x);
263 }
264 }
265
266 if (TREE_CODE (t) == SSA_NAME)
267 {
268 int part = var_to_partition (SA.map, t);
269 if (part != NO_PARTITION)
270 {
271 if (SA.partition_to_pseudo[part])
272 gcc_assert (SA.partition_to_pseudo[part] == x);
273 else if (x != pc_rtx)
274 SA.partition_to_pseudo[part] = x;
275 }
276 /* For the benefit of debug information at -O0 (where
277 vartracking doesn't run) record the place also in the base
278 DECL. For PARMs and RESULTs, do so only when setting the
279 default def. */
280 if (x && x != pc_rtx && SSA_NAME_VAR (t)
281 && (VAR_P (SSA_NAME_VAR (t))
282 || SSA_NAME_IS_DEFAULT_DEF (t)))
283 {
284 tree var = SSA_NAME_VAR (t);
285 /* If we don't yet have something recorded, just record it now. */
286 if (!DECL_RTL_SET_P (var))
287 SET_DECL_RTL (var, x);
288 /* If we have it set already to "multiple places" don't
289 change this. */
290 else if (DECL_RTL (var) == pc_rtx)
291 ;
292 /* If we have something recorded and it's not the same place
293 as we want to record now, we have multiple partitions for the
294 same base variable, with different places. We can't just
295 randomly chose one, hence we have to say that we don't know.
296 This only happens with optimization, and there var-tracking
297 will figure out the right thing. */
298 else if (DECL_RTL (var) != x)
299 SET_DECL_RTL (var, pc_rtx);
300 }
301 }
302 else
303 SET_DECL_RTL (t, x);
304 }
305
306 /* This structure holds data relevant to one variable that will be
307 placed in a stack slot. */
308 class stack_var
309 {
310 public:
311 /* The Variable. */
312 tree decl;
313
314 /* Initially, the size of the variable. Later, the size of the partition,
315 if this variable becomes it's partition's representative. */
316 poly_uint64 size;
317
318 /* The *byte* alignment required for this variable. Or as, with the
319 size, the alignment for this partition. */
320 unsigned int alignb;
321
322 /* The partition representative. */
323 size_t representative;
324
325 /* The next stack variable in the partition, or EOC. */
326 size_t next;
327
328 /* The numbers of conflicting stack variables. */
329 bitmap conflicts;
330 };
331
332 #define EOC ((size_t)-1)
333
334 /* We have an array of such objects while deciding allocation. */
335 static class stack_var *stack_vars;
336 static size_t stack_vars_alloc;
337 static size_t stack_vars_num;
338 static hash_map<tree, size_t> *decl_to_stack_part;
339
340 /* Conflict bitmaps go on this obstack. This allows us to destroy
341 all of them in one big sweep. */
342 static bitmap_obstack stack_var_bitmap_obstack;
343
344 /* An array of indices such that stack_vars[stack_vars_sorted[i]].size
345 is non-decreasing. */
346 static size_t *stack_vars_sorted;
347
348 /* The phase of the stack frame. This is the known misalignment of
349 virtual_stack_vars_rtx from PREFERRED_STACK_BOUNDARY. That is,
350 (frame_offset+frame_phase) % PREFERRED_STACK_BOUNDARY == 0. */
351 static int frame_phase;
352
353 /* Used during expand_used_vars to remember if we saw any decls for
354 which we'd like to enable stack smashing protection. */
355 static bool has_protected_decls;
356
357 /* Used during expand_used_vars. Remember if we say a character buffer
358 smaller than our cutoff threshold. Used for -Wstack-protector. */
359 static bool has_short_buffer;
360
361 /* Compute the byte alignment to use for DECL. Ignore alignment
362 we can't do with expected alignment of the stack boundary. */
363
364 static unsigned int
365 align_local_variable (tree decl, bool really_expand)
366 {
367 unsigned int align;
368
369 if (TREE_CODE (decl) == SSA_NAME)
370 align = TYPE_ALIGN (TREE_TYPE (decl));
371 else
372 {
373 align = LOCAL_DECL_ALIGNMENT (decl);
374 /* Don't change DECL_ALIGN when called from estimated_stack_frame_size.
375 That is done before IPA and could bump alignment based on host
376 backend even for offloaded code which wants different
377 LOCAL_DECL_ALIGNMENT. */
378 if (really_expand)
379 SET_DECL_ALIGN (decl, align);
380 }
381 return align / BITS_PER_UNIT;
382 }
383
384 /* Align given offset BASE with ALIGN. Truncate up if ALIGN_UP is true,
385 down otherwise. Return truncated BASE value. */
386
387 static inline unsigned HOST_WIDE_INT
388 align_base (HOST_WIDE_INT base, unsigned HOST_WIDE_INT align, bool align_up)
389 {
390 return align_up ? (base + align - 1) & -align : base & -align;
391 }
392
393 /* Allocate SIZE bytes at byte alignment ALIGN from the stack frame.
394 Return the frame offset. */
395
396 static poly_int64
397 alloc_stack_frame_space (poly_int64 size, unsigned HOST_WIDE_INT align)
398 {
399 poly_int64 offset, new_frame_offset;
400
401 if (FRAME_GROWS_DOWNWARD)
402 {
403 new_frame_offset
404 = aligned_lower_bound (frame_offset - frame_phase - size,
405 align) + frame_phase;
406 offset = new_frame_offset;
407 }
408 else
409 {
410 new_frame_offset
411 = aligned_upper_bound (frame_offset - frame_phase,
412 align) + frame_phase;
413 offset = new_frame_offset;
414 new_frame_offset += size;
415 }
416 frame_offset = new_frame_offset;
417
418 if (frame_offset_overflow (frame_offset, cfun->decl))
419 frame_offset = offset = 0;
420
421 return offset;
422 }
423
424 /* Accumulate DECL into STACK_VARS. */
425
426 static void
427 add_stack_var (tree decl, bool really_expand)
428 {
429 class stack_var *v;
430
431 if (stack_vars_num >= stack_vars_alloc)
432 {
433 if (stack_vars_alloc)
434 stack_vars_alloc = stack_vars_alloc * 3 / 2;
435 else
436 stack_vars_alloc = 32;
437 stack_vars
438 = XRESIZEVEC (class stack_var, stack_vars, stack_vars_alloc);
439 }
440 if (!decl_to_stack_part)
441 decl_to_stack_part = new hash_map<tree, size_t>;
442
443 v = &stack_vars[stack_vars_num];
444 decl_to_stack_part->put (decl, stack_vars_num);
445
446 v->decl = decl;
447 tree size = TREE_CODE (decl) == SSA_NAME
448 ? TYPE_SIZE_UNIT (TREE_TYPE (decl))
449 : DECL_SIZE_UNIT (decl);
450 v->size = tree_to_poly_uint64 (size);
451 /* Ensure that all variables have size, so that &a != &b for any two
452 variables that are simultaneously live. */
453 if (known_eq (v->size, 0U))
454 v->size = 1;
455 v->alignb = align_local_variable (decl, really_expand);
456 /* An alignment of zero can mightily confuse us later. */
457 gcc_assert (v->alignb != 0);
458
459 /* All variables are initially in their own partition. */
460 v->representative = stack_vars_num;
461 v->next = EOC;
462
463 /* All variables initially conflict with no other. */
464 v->conflicts = NULL;
465
466 /* Ensure that this decl doesn't get put onto the list twice. */
467 set_rtl (decl, pc_rtx);
468
469 stack_vars_num++;
470 }
471
472 /* Make the decls associated with luid's X and Y conflict. */
473
474 static void
475 add_stack_var_conflict (size_t x, size_t y)
476 {
477 class stack_var *a = &stack_vars[x];
478 class stack_var *b = &stack_vars[y];
479 if (x == y)
480 return;
481 if (!a->conflicts)
482 a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack);
483 if (!b->conflicts)
484 b->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack);
485 bitmap_set_bit (a->conflicts, y);
486 bitmap_set_bit (b->conflicts, x);
487 }
488
489 /* Check whether the decls associated with luid's X and Y conflict. */
490
491 static bool
492 stack_var_conflict_p (size_t x, size_t y)
493 {
494 class stack_var *a = &stack_vars[x];
495 class stack_var *b = &stack_vars[y];
496 if (x == y)
497 return false;
498 /* Partitions containing an SSA name result from gimple registers
499 with things like unsupported modes. They are top-level and
500 hence conflict with everything else. */
501 if (TREE_CODE (a->decl) == SSA_NAME || TREE_CODE (b->decl) == SSA_NAME)
502 return true;
503
504 if (!a->conflicts || !b->conflicts)
505 return false;
506 return bitmap_bit_p (a->conflicts, y);
507 }
508
509 /* Callback for walk_stmt_ops. If OP is a decl touched by add_stack_var
510 enter its partition number into bitmap DATA. */
511
512 static bool
513 visit_op (gimple *, tree op, tree, void *data)
514 {
515 bitmap active = (bitmap)data;
516 op = get_base_address (op);
517 if (op
518 && DECL_P (op)
519 && DECL_RTL_IF_SET (op) == pc_rtx)
520 {
521 size_t *v = decl_to_stack_part->get (op);
522 if (v)
523 bitmap_set_bit (active, *v);
524 }
525 return false;
526 }
527
528 /* Callback for walk_stmt_ops. If OP is a decl touched by add_stack_var
529 record conflicts between it and all currently active other partitions
530 from bitmap DATA. */
531
532 static bool
533 visit_conflict (gimple *, tree op, tree, void *data)
534 {
535 bitmap active = (bitmap)data;
536 op = get_base_address (op);
537 if (op
538 && DECL_P (op)
539 && DECL_RTL_IF_SET (op) == pc_rtx)
540 {
541 size_t *v = decl_to_stack_part->get (op);
542 if (v && bitmap_set_bit (active, *v))
543 {
544 size_t num = *v;
545 bitmap_iterator bi;
546 unsigned i;
547 gcc_assert (num < stack_vars_num);
548 EXECUTE_IF_SET_IN_BITMAP (active, 0, i, bi)
549 add_stack_var_conflict (num, i);
550 }
551 }
552 return false;
553 }
554
555 /* Helper routine for add_scope_conflicts, calculating the active partitions
556 at the end of BB, leaving the result in WORK. We're called to generate
557 conflicts when FOR_CONFLICT is true, otherwise we're just tracking
558 liveness. */
559
560 static void
561 add_scope_conflicts_1 (basic_block bb, bitmap work, bool for_conflict)
562 {
563 edge e;
564 edge_iterator ei;
565 gimple_stmt_iterator gsi;
566 walk_stmt_load_store_addr_fn visit;
567
568 bitmap_clear (work);
569 FOR_EACH_EDGE (e, ei, bb->preds)
570 bitmap_ior_into (work, (bitmap)e->src->aux);
571
572 visit = visit_op;
573
574 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
575 {
576 gimple *stmt = gsi_stmt (gsi);
577 walk_stmt_load_store_addr_ops (stmt, work, NULL, NULL, visit);
578 }
579 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
580 {
581 gimple *stmt = gsi_stmt (gsi);
582
583 if (gimple_clobber_p (stmt))
584 {
585 tree lhs = gimple_assign_lhs (stmt);
586 size_t *v;
587 /* Nested function lowering might introduce LHSs
588 that are COMPONENT_REFs. */
589 if (!VAR_P (lhs))
590 continue;
591 if (DECL_RTL_IF_SET (lhs) == pc_rtx
592 && (v = decl_to_stack_part->get (lhs)))
593 bitmap_clear_bit (work, *v);
594 }
595 else if (!is_gimple_debug (stmt))
596 {
597 if (for_conflict
598 && visit == visit_op)
599 {
600 /* If this is the first real instruction in this BB we need
601 to add conflicts for everything live at this point now.
602 Unlike classical liveness for named objects we can't
603 rely on seeing a def/use of the names we're interested in.
604 There might merely be indirect loads/stores. We'd not add any
605 conflicts for such partitions. */
606 bitmap_iterator bi;
607 unsigned i;
608 EXECUTE_IF_SET_IN_BITMAP (work, 0, i, bi)
609 {
610 class stack_var *a = &stack_vars[i];
611 if (!a->conflicts)
612 a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack);
613 bitmap_ior_into (a->conflicts, work);
614 }
615 visit = visit_conflict;
616 }
617 walk_stmt_load_store_addr_ops (stmt, work, visit, visit, visit);
618 }
619 }
620 }
621
622 /* Generate stack partition conflicts between all partitions that are
623 simultaneously live. */
624
625 static void
626 add_scope_conflicts (void)
627 {
628 basic_block bb;
629 bool changed;
630 bitmap work = BITMAP_ALLOC (NULL);
631 int *rpo;
632 int n_bbs;
633
634 /* We approximate the live range of a stack variable by taking the first
635 mention of its name as starting point(s), and by the end-of-scope
636 death clobber added by gimplify as ending point(s) of the range.
637 This overapproximates in the case we for instance moved an address-taken
638 operation upward, without also moving a dereference to it upwards.
639 But it's conservatively correct as a variable never can hold values
640 before its name is mentioned at least once.
641
642 We then do a mostly classical bitmap liveness algorithm. */
643
644 FOR_ALL_BB_FN (bb, cfun)
645 bb->aux = BITMAP_ALLOC (&stack_var_bitmap_obstack);
646
647 rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
648 n_bbs = pre_and_rev_post_order_compute (NULL, rpo, false);
649
650 changed = true;
651 while (changed)
652 {
653 int i;
654 changed = false;
655 for (i = 0; i < n_bbs; i++)
656 {
657 bitmap active;
658 bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
659 active = (bitmap)bb->aux;
660 add_scope_conflicts_1 (bb, work, false);
661 if (bitmap_ior_into (active, work))
662 changed = true;
663 }
664 }
665
666 FOR_EACH_BB_FN (bb, cfun)
667 add_scope_conflicts_1 (bb, work, true);
668
669 free (rpo);
670 BITMAP_FREE (work);
671 FOR_ALL_BB_FN (bb, cfun)
672 BITMAP_FREE (bb->aux);
673 }
674
675 /* A subroutine of partition_stack_vars. A comparison function for qsort,
676 sorting an array of indices by the properties of the object. */
677
678 static int
679 stack_var_cmp (const void *a, const void *b)
680 {
681 size_t ia = *(const size_t *)a;
682 size_t ib = *(const size_t *)b;
683 unsigned int aligna = stack_vars[ia].alignb;
684 unsigned int alignb = stack_vars[ib].alignb;
685 poly_int64 sizea = stack_vars[ia].size;
686 poly_int64 sizeb = stack_vars[ib].size;
687 tree decla = stack_vars[ia].decl;
688 tree declb = stack_vars[ib].decl;
689 bool largea, largeb;
690 unsigned int uida, uidb;
691
692 /* Primary compare on "large" alignment. Large comes first. */
693 largea = (aligna * BITS_PER_UNIT > MAX_SUPPORTED_STACK_ALIGNMENT);
694 largeb = (alignb * BITS_PER_UNIT > MAX_SUPPORTED_STACK_ALIGNMENT);
695 if (largea != largeb)
696 return (int)largeb - (int)largea;
697
698 /* Secondary compare on size, decreasing */
699 int diff = compare_sizes_for_sort (sizeb, sizea);
700 if (diff != 0)
701 return diff;
702
703 /* Tertiary compare on true alignment, decreasing. */
704 if (aligna < alignb)
705 return -1;
706 if (aligna > alignb)
707 return 1;
708
709 /* Final compare on ID for sort stability, increasing.
710 Two SSA names are compared by their version, SSA names come before
711 non-SSA names, and two normal decls are compared by their DECL_UID. */
712 if (TREE_CODE (decla) == SSA_NAME)
713 {
714 if (TREE_CODE (declb) == SSA_NAME)
715 uida = SSA_NAME_VERSION (decla), uidb = SSA_NAME_VERSION (declb);
716 else
717 return -1;
718 }
719 else if (TREE_CODE (declb) == SSA_NAME)
720 return 1;
721 else
722 uida = DECL_UID (decla), uidb = DECL_UID (declb);
723 if (uida < uidb)
724 return 1;
725 if (uida > uidb)
726 return -1;
727 return 0;
728 }
729
730 struct part_traits : unbounded_int_hashmap_traits <size_t, bitmap> {};
731 typedef hash_map<size_t, bitmap, part_traits> part_hashmap;
732
733 /* If the points-to solution *PI points to variables that are in a partition
734 together with other variables add all partition members to the pointed-to
735 variables bitmap. */
736
737 static void
738 add_partitioned_vars_to_ptset (struct pt_solution *pt,
739 part_hashmap *decls_to_partitions,
740 hash_set<bitmap> *visited, bitmap temp)
741 {
742 bitmap_iterator bi;
743 unsigned i;
744 bitmap *part;
745
746 if (pt->anything
747 || pt->vars == NULL
748 /* The pointed-to vars bitmap is shared, it is enough to
749 visit it once. */
750 || visited->add (pt->vars))
751 return;
752
753 bitmap_clear (temp);
754
755 /* By using a temporary bitmap to store all members of the partitions
756 we have to add we make sure to visit each of the partitions only
757 once. */
758 EXECUTE_IF_SET_IN_BITMAP (pt->vars, 0, i, bi)
759 if ((!temp
760 || !bitmap_bit_p (temp, i))
761 && (part = decls_to_partitions->get (i)))
762 bitmap_ior_into (temp, *part);
763 if (!bitmap_empty_p (temp))
764 bitmap_ior_into (pt->vars, temp);
765 }
766
767 /* Update points-to sets based on partition info, so we can use them on RTL.
768 The bitmaps representing stack partitions will be saved until expand,
769 where partitioned decls used as bases in memory expressions will be
770 rewritten. */
771
772 static void
773 update_alias_info_with_stack_vars (void)
774 {
775 part_hashmap *decls_to_partitions = NULL;
776 size_t i, j;
777 tree var = NULL_TREE;
778
779 for (i = 0; i < stack_vars_num; i++)
780 {
781 bitmap part = NULL;
782 tree name;
783 struct ptr_info_def *pi;
784
785 /* Not interested in partitions with single variable. */
786 if (stack_vars[i].representative != i
787 || stack_vars[i].next == EOC)
788 continue;
789
790 if (!decls_to_partitions)
791 {
792 decls_to_partitions = new part_hashmap;
793 cfun->gimple_df->decls_to_pointers = new hash_map<tree, tree>;
794 }
795
796 /* Create an SSA_NAME that points to the partition for use
797 as base during alias-oracle queries on RTL for bases that
798 have been partitioned. */
799 if (var == NULL_TREE)
800 var = create_tmp_var (ptr_type_node);
801 name = make_ssa_name (var);
802
803 /* Create bitmaps representing partitions. They will be used for
804 points-to sets later, so use GGC alloc. */
805 part = BITMAP_GGC_ALLOC ();
806 for (j = i; j != EOC; j = stack_vars[j].next)
807 {
808 tree decl = stack_vars[j].decl;
809 unsigned int uid = DECL_PT_UID (decl);
810 bitmap_set_bit (part, uid);
811 decls_to_partitions->put (uid, part);
812 cfun->gimple_df->decls_to_pointers->put (decl, name);
813 if (TREE_ADDRESSABLE (decl))
814 TREE_ADDRESSABLE (name) = 1;
815 }
816
817 /* Make the SSA name point to all partition members. */
818 pi = get_ptr_info (name);
819 pt_solution_set (&pi->pt, part, false);
820 }
821
822 /* Make all points-to sets that contain one member of a partition
823 contain all members of the partition. */
824 if (decls_to_partitions)
825 {
826 unsigned i;
827 tree name;
828 hash_set<bitmap> visited;
829 bitmap temp = BITMAP_ALLOC (&stack_var_bitmap_obstack);
830
831 FOR_EACH_SSA_NAME (i, name, cfun)
832 {
833 struct ptr_info_def *pi;
834
835 if (POINTER_TYPE_P (TREE_TYPE (name))
836 && ((pi = SSA_NAME_PTR_INFO (name)) != NULL))
837 add_partitioned_vars_to_ptset (&pi->pt, decls_to_partitions,
838 &visited, temp);
839 }
840
841 add_partitioned_vars_to_ptset (&cfun->gimple_df->escaped,
842 decls_to_partitions, &visited, temp);
843
844 delete decls_to_partitions;
845 BITMAP_FREE (temp);
846 }
847 }
848
849 /* A subroutine of partition_stack_vars. The UNION portion of a UNION/FIND
850 partitioning algorithm. Partitions A and B are known to be non-conflicting.
851 Merge them into a single partition A. */
852
853 static void
854 union_stack_vars (size_t a, size_t b)
855 {
856 class stack_var *vb = &stack_vars[b];
857 bitmap_iterator bi;
858 unsigned u;
859
860 gcc_assert (stack_vars[b].next == EOC);
861 /* Add B to A's partition. */
862 stack_vars[b].next = stack_vars[a].next;
863 stack_vars[b].representative = a;
864 stack_vars[a].next = b;
865
866 /* Update the required alignment of partition A to account for B. */
867 if (stack_vars[a].alignb < stack_vars[b].alignb)
868 stack_vars[a].alignb = stack_vars[b].alignb;
869
870 /* Update the interference graph and merge the conflicts. */
871 if (vb->conflicts)
872 {
873 EXECUTE_IF_SET_IN_BITMAP (vb->conflicts, 0, u, bi)
874 add_stack_var_conflict (a, stack_vars[u].representative);
875 BITMAP_FREE (vb->conflicts);
876 }
877 }
878
879 /* A subroutine of expand_used_vars. Binpack the variables into
880 partitions constrained by the interference graph. The overall
881 algorithm used is as follows:
882
883 Sort the objects by size in descending order.
884 For each object A {
885 S = size(A)
886 O = 0
887 loop {
888 Look for the largest non-conflicting object B with size <= S.
889 UNION (A, B)
890 }
891 }
892 */
893
894 static void
895 partition_stack_vars (void)
896 {
897 size_t si, sj, n = stack_vars_num;
898
899 stack_vars_sorted = XNEWVEC (size_t, stack_vars_num);
900 for (si = 0; si < n; ++si)
901 stack_vars_sorted[si] = si;
902
903 if (n == 1)
904 return;
905
906 qsort (stack_vars_sorted, n, sizeof (size_t), stack_var_cmp);
907
908 for (si = 0; si < n; ++si)
909 {
910 size_t i = stack_vars_sorted[si];
911 unsigned int ialign = stack_vars[i].alignb;
912 poly_int64 isize = stack_vars[i].size;
913
914 /* Ignore objects that aren't partition representatives. If we
915 see a var that is not a partition representative, it must
916 have been merged earlier. */
917 if (stack_vars[i].representative != i)
918 continue;
919
920 for (sj = si + 1; sj < n; ++sj)
921 {
922 size_t j = stack_vars_sorted[sj];
923 unsigned int jalign = stack_vars[j].alignb;
924 poly_int64 jsize = stack_vars[j].size;
925
926 /* Ignore objects that aren't partition representatives. */
927 if (stack_vars[j].representative != j)
928 continue;
929
930 /* Do not mix objects of "small" (supported) alignment
931 and "large" (unsupported) alignment. */
932 if ((ialign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
933 != (jalign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT))
934 break;
935
936 /* For Address Sanitizer do not mix objects with different
937 sizes, as the shorter vars wouldn't be adequately protected.
938 Don't do that for "large" (unsupported) alignment objects,
939 those aren't protected anyway. */
940 if (asan_sanitize_stack_p ()
941 && maybe_ne (isize, jsize)
942 && ialign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
943 break;
944
945 /* Ignore conflicting objects. */
946 if (stack_var_conflict_p (i, j))
947 continue;
948
949 /* UNION the objects, placing J at OFFSET. */
950 union_stack_vars (i, j);
951 }
952 }
953
954 update_alias_info_with_stack_vars ();
955 }
956
957 /* A debugging aid for expand_used_vars. Dump the generated partitions. */
958
959 static void
960 dump_stack_var_partition (void)
961 {
962 size_t si, i, j, n = stack_vars_num;
963
964 for (si = 0; si < n; ++si)
965 {
966 i = stack_vars_sorted[si];
967
968 /* Skip variables that aren't partition representatives, for now. */
969 if (stack_vars[i].representative != i)
970 continue;
971
972 fprintf (dump_file, "Partition %lu: size ", (unsigned long) i);
973 print_dec (stack_vars[i].size, dump_file);
974 fprintf (dump_file, " align %u\n", stack_vars[i].alignb);
975
976 for (j = i; j != EOC; j = stack_vars[j].next)
977 {
978 fputc ('\t', dump_file);
979 print_generic_expr (dump_file, stack_vars[j].decl, dump_flags);
980 }
981 fputc ('\n', dump_file);
982 }
983 }
984
985 /* Assign rtl to DECL at BASE + OFFSET. */
986
987 static void
988 expand_one_stack_var_at (tree decl, rtx base, unsigned base_align,
989 poly_int64 offset)
990 {
991 unsigned align;
992 rtx x;
993
994 /* If this fails, we've overflowed the stack frame. Error nicely? */
995 gcc_assert (known_eq (offset, trunc_int_for_mode (offset, Pmode)));
996
997 x = plus_constant (Pmode, base, offset);
998 x = gen_rtx_MEM (TREE_CODE (decl) == SSA_NAME
999 ? TYPE_MODE (TREE_TYPE (decl))
1000 : DECL_MODE (SSAVAR (decl)), x);
1001
1002 if (TREE_CODE (decl) != SSA_NAME)
1003 {
1004 /* Set alignment we actually gave this decl if it isn't an SSA name.
1005 If it is we generate stack slots only accidentally so it isn't as
1006 important, we'll simply use the alignment that is already set. */
1007 if (base == virtual_stack_vars_rtx)
1008 offset -= frame_phase;
1009 align = known_alignment (offset);
1010 align *= BITS_PER_UNIT;
1011 if (align == 0 || align > base_align)
1012 align = base_align;
1013
1014 /* One would think that we could assert that we're not decreasing
1015 alignment here, but (at least) the i386 port does exactly this
1016 via the MINIMUM_ALIGNMENT hook. */
1017
1018 SET_DECL_ALIGN (decl, align);
1019 DECL_USER_ALIGN (decl) = 0;
1020 }
1021
1022 set_rtl (decl, x);
1023 }
1024
1025 class stack_vars_data
1026 {
1027 public:
1028 /* Vector of offset pairs, always end of some padding followed
1029 by start of the padding that needs Address Sanitizer protection.
1030 The vector is in reversed, highest offset pairs come first. */
1031 auto_vec<HOST_WIDE_INT> asan_vec;
1032
1033 /* Vector of partition representative decls in between the paddings. */
1034 auto_vec<tree> asan_decl_vec;
1035
1036 /* Base pseudo register for Address Sanitizer protected automatic vars. */
1037 rtx asan_base;
1038
1039 /* Alignment needed for the Address Sanitizer protected automatic vars. */
1040 unsigned int asan_alignb;
1041 };
1042
1043 /* A subroutine of expand_used_vars. Give each partition representative
1044 a unique location within the stack frame. Update each partition member
1045 with that location. */
1046
1047 static void
1048 expand_stack_vars (bool (*pred) (size_t), class stack_vars_data *data)
1049 {
1050 size_t si, i, j, n = stack_vars_num;
1051 poly_uint64 large_size = 0, large_alloc = 0;
1052 rtx large_base = NULL;
1053 unsigned large_align = 0;
1054 bool large_allocation_done = false;
1055 tree decl;
1056
1057 /* Determine if there are any variables requiring "large" alignment.
1058 Since these are dynamically allocated, we only process these if
1059 no predicate involved. */
1060 large_align = stack_vars[stack_vars_sorted[0]].alignb * BITS_PER_UNIT;
1061 if (pred == NULL && large_align > MAX_SUPPORTED_STACK_ALIGNMENT)
1062 {
1063 /* Find the total size of these variables. */
1064 for (si = 0; si < n; ++si)
1065 {
1066 unsigned alignb;
1067
1068 i = stack_vars_sorted[si];
1069 alignb = stack_vars[i].alignb;
1070
1071 /* All "large" alignment decls come before all "small" alignment
1072 decls, but "large" alignment decls are not sorted based on
1073 their alignment. Increase large_align to track the largest
1074 required alignment. */
1075 if ((alignb * BITS_PER_UNIT) > large_align)
1076 large_align = alignb * BITS_PER_UNIT;
1077
1078 /* Stop when we get to the first decl with "small" alignment. */
1079 if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
1080 break;
1081
1082 /* Skip variables that aren't partition representatives. */
1083 if (stack_vars[i].representative != i)
1084 continue;
1085
1086 /* Skip variables that have already had rtl assigned. See also
1087 add_stack_var where we perpetrate this pc_rtx hack. */
1088 decl = stack_vars[i].decl;
1089 if (TREE_CODE (decl) == SSA_NAME
1090 ? SA.partition_to_pseudo[var_to_partition (SA.map, decl)] != NULL_RTX
1091 : DECL_RTL (decl) != pc_rtx)
1092 continue;
1093
1094 large_size = aligned_upper_bound (large_size, alignb);
1095 large_size += stack_vars[i].size;
1096 }
1097 }
1098
1099 for (si = 0; si < n; ++si)
1100 {
1101 rtx base;
1102 unsigned base_align, alignb;
1103 poly_int64 offset;
1104
1105 i = stack_vars_sorted[si];
1106
1107 /* Skip variables that aren't partition representatives, for now. */
1108 if (stack_vars[i].representative != i)
1109 continue;
1110
1111 /* Skip variables that have already had rtl assigned. See also
1112 add_stack_var where we perpetrate this pc_rtx hack. */
1113 decl = stack_vars[i].decl;
1114 if (TREE_CODE (decl) == SSA_NAME
1115 ? SA.partition_to_pseudo[var_to_partition (SA.map, decl)] != NULL_RTX
1116 : DECL_RTL (decl) != pc_rtx)
1117 continue;
1118
1119 /* Check the predicate to see whether this variable should be
1120 allocated in this pass. */
1121 if (pred && !pred (i))
1122 continue;
1123
1124 alignb = stack_vars[i].alignb;
1125 if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
1126 {
1127 base = virtual_stack_vars_rtx;
1128 /* ASAN description strings don't yet have a syntax for expressing
1129 polynomial offsets. */
1130 HOST_WIDE_INT prev_offset;
1131 if (asan_sanitize_stack_p ()
1132 && pred
1133 && frame_offset.is_constant (&prev_offset)
1134 && stack_vars[i].size.is_constant ())
1135 {
1136 if (data->asan_vec.is_empty ())
1137 {
1138 alloc_stack_frame_space (0, ASAN_RED_ZONE_SIZE);
1139 prev_offset = frame_offset.to_constant ();
1140 }
1141 prev_offset = align_base (prev_offset,
1142 ASAN_MIN_RED_ZONE_SIZE,
1143 !FRAME_GROWS_DOWNWARD);
1144 tree repr_decl = NULL_TREE;
1145 unsigned HOST_WIDE_INT size
1146 = asan_var_and_redzone_size (stack_vars[i].size.to_constant ());
1147 if (data->asan_vec.is_empty ())
1148 size = MAX (size, ASAN_RED_ZONE_SIZE);
1149
1150 unsigned HOST_WIDE_INT alignment = MAX (alignb,
1151 ASAN_MIN_RED_ZONE_SIZE);
1152 offset = alloc_stack_frame_space (size, alignment);
1153
1154 data->asan_vec.safe_push (prev_offset);
1155 /* Allocating a constant amount of space from a constant
1156 starting offset must give a constant result. */
1157 data->asan_vec.safe_push ((offset + stack_vars[i].size)
1158 .to_constant ());
1159 /* Find best representative of the partition.
1160 Prefer those with DECL_NAME, even better
1161 satisfying asan_protect_stack_decl predicate. */
1162 for (j = i; j != EOC; j = stack_vars[j].next)
1163 if (asan_protect_stack_decl (stack_vars[j].decl)
1164 && DECL_NAME (stack_vars[j].decl))
1165 {
1166 repr_decl = stack_vars[j].decl;
1167 break;
1168 }
1169 else if (repr_decl == NULL_TREE
1170 && DECL_P (stack_vars[j].decl)
1171 && DECL_NAME (stack_vars[j].decl))
1172 repr_decl = stack_vars[j].decl;
1173 if (repr_decl == NULL_TREE)
1174 repr_decl = stack_vars[i].decl;
1175 data->asan_decl_vec.safe_push (repr_decl);
1176
1177 /* Make sure a representative is unpoison if another
1178 variable in the partition is handled by
1179 use-after-scope sanitization. */
1180 if (asan_handled_variables != NULL
1181 && !asan_handled_variables->contains (repr_decl))
1182 {
1183 for (j = i; j != EOC; j = stack_vars[j].next)
1184 if (asan_handled_variables->contains (stack_vars[j].decl))
1185 break;
1186 if (j != EOC)
1187 asan_handled_variables->add (repr_decl);
1188 }
1189
1190 data->asan_alignb = MAX (data->asan_alignb, alignb);
1191 if (data->asan_base == NULL)
1192 data->asan_base = gen_reg_rtx (Pmode);
1193 base = data->asan_base;
1194
1195 if (!STRICT_ALIGNMENT)
1196 base_align = crtl->max_used_stack_slot_alignment;
1197 else
1198 base_align = MAX (crtl->max_used_stack_slot_alignment,
1199 GET_MODE_ALIGNMENT (SImode)
1200 << ASAN_SHADOW_SHIFT);
1201 }
1202 else
1203 {
1204 offset = alloc_stack_frame_space (stack_vars[i].size, alignb);
1205 base_align = crtl->max_used_stack_slot_alignment;
1206 }
1207 }
1208 else
1209 {
1210 /* Large alignment is only processed in the last pass. */
1211 if (pred)
1212 continue;
1213
1214 /* If there were any variables requiring "large" alignment, allocate
1215 space. */
1216 if (maybe_ne (large_size, 0U) && ! large_allocation_done)
1217 {
1218 poly_int64 loffset;
1219 rtx large_allocsize;
1220
1221 large_allocsize = gen_int_mode (large_size, Pmode);
1222 get_dynamic_stack_size (&large_allocsize, 0, large_align, NULL);
1223 loffset = alloc_stack_frame_space
1224 (rtx_to_poly_int64 (large_allocsize),
1225 PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
1226 large_base = get_dynamic_stack_base (loffset, large_align);
1227 large_allocation_done = true;
1228 }
1229 gcc_assert (large_base != NULL);
1230
1231 large_alloc = aligned_upper_bound (large_alloc, alignb);
1232 offset = large_alloc;
1233 large_alloc += stack_vars[i].size;
1234
1235 base = large_base;
1236 base_align = large_align;
1237 }
1238
1239 /* Create rtl for each variable based on their location within the
1240 partition. */
1241 for (j = i; j != EOC; j = stack_vars[j].next)
1242 {
1243 expand_one_stack_var_at (stack_vars[j].decl,
1244 base, base_align,
1245 offset);
1246 }
1247 }
1248
1249 gcc_assert (known_eq (large_alloc, large_size));
1250 }
1251
1252 /* Take into account all sizes of partitions and reset DECL_RTLs. */
1253 static poly_uint64
1254 account_stack_vars (void)
1255 {
1256 size_t si, j, i, n = stack_vars_num;
1257 poly_uint64 size = 0;
1258
1259 for (si = 0; si < n; ++si)
1260 {
1261 i = stack_vars_sorted[si];
1262
1263 /* Skip variables that aren't partition representatives, for now. */
1264 if (stack_vars[i].representative != i)
1265 continue;
1266
1267 size += stack_vars[i].size;
1268 for (j = i; j != EOC; j = stack_vars[j].next)
1269 set_rtl (stack_vars[j].decl, NULL);
1270 }
1271 return size;
1272 }
1273
1274 /* Record the RTL assignment X for the default def of PARM. */
1275
1276 extern void
1277 set_parm_rtl (tree parm, rtx x)
1278 {
1279 gcc_assert (TREE_CODE (parm) == PARM_DECL
1280 || TREE_CODE (parm) == RESULT_DECL);
1281
1282 if (x && !MEM_P (x))
1283 {
1284 unsigned int align = MINIMUM_ALIGNMENT (TREE_TYPE (parm),
1285 TYPE_MODE (TREE_TYPE (parm)),
1286 TYPE_ALIGN (TREE_TYPE (parm)));
1287
1288 /* If the variable alignment is very large we'll dynamicaly
1289 allocate it, which means that in-frame portion is just a
1290 pointer. ??? We've got a pseudo for sure here, do we
1291 actually dynamically allocate its spilling area if needed?
1292 ??? Isn't it a problem when Pmode alignment also exceeds
1293 MAX_SUPPORTED_STACK_ALIGNMENT, as can happen on cris and lm32? */
1294 if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
1295 align = GET_MODE_ALIGNMENT (Pmode);
1296
1297 record_alignment_for_reg_var (align);
1298 }
1299
1300 tree ssa = ssa_default_def (cfun, parm);
1301 if (!ssa)
1302 return set_rtl (parm, x);
1303
1304 int part = var_to_partition (SA.map, ssa);
1305 gcc_assert (part != NO_PARTITION);
1306
1307 bool changed = bitmap_bit_p (SA.partitions_for_parm_default_defs, part);
1308 gcc_assert (changed);
1309
1310 set_rtl (ssa, x);
1311 gcc_assert (DECL_RTL (parm) == x);
1312 }
1313
1314 /* A subroutine of expand_one_var. Called to immediately assign rtl
1315 to a variable to be allocated in the stack frame. */
1316
1317 static void
1318 expand_one_stack_var_1 (tree var)
1319 {
1320 poly_uint64 size;
1321 poly_int64 offset;
1322 unsigned byte_align;
1323
1324 if (TREE_CODE (var) == SSA_NAME)
1325 {
1326 tree type = TREE_TYPE (var);
1327 size = tree_to_poly_uint64 (TYPE_SIZE_UNIT (type));
1328 byte_align = TYPE_ALIGN_UNIT (type);
1329 }
1330 else
1331 {
1332 size = tree_to_poly_uint64 (DECL_SIZE_UNIT (var));
1333 byte_align = align_local_variable (var, true);
1334 }
1335
1336 /* We handle highly aligned variables in expand_stack_vars. */
1337 gcc_assert (byte_align * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT);
1338
1339 offset = alloc_stack_frame_space (size, byte_align);
1340
1341 expand_one_stack_var_at (var, virtual_stack_vars_rtx,
1342 crtl->max_used_stack_slot_alignment, offset);
1343 }
1344
1345 /* Wrapper for expand_one_stack_var_1 that checks SSA_NAMEs are
1346 already assigned some MEM. */
1347
1348 static void
1349 expand_one_stack_var (tree var)
1350 {
1351 if (TREE_CODE (var) == SSA_NAME)
1352 {
1353 int part = var_to_partition (SA.map, var);
1354 if (part != NO_PARTITION)
1355 {
1356 rtx x = SA.partition_to_pseudo[part];
1357 gcc_assert (x);
1358 gcc_assert (MEM_P (x));
1359 return;
1360 }
1361 }
1362
1363 return expand_one_stack_var_1 (var);
1364 }
1365
1366 /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL
1367 that will reside in a hard register. */
1368
1369 static void
1370 expand_one_hard_reg_var (tree var)
1371 {
1372 rest_of_decl_compilation (var, 0, 0);
1373 }
1374
1375 /* Record the alignment requirements of some variable assigned to a
1376 pseudo. */
1377
1378 static void
1379 record_alignment_for_reg_var (unsigned int align)
1380 {
1381 if (SUPPORTS_STACK_ALIGNMENT
1382 && crtl->stack_alignment_estimated < align)
1383 {
1384 /* stack_alignment_estimated shouldn't change after stack
1385 realign decision made */
1386 gcc_assert (!crtl->stack_realign_processed);
1387 crtl->stack_alignment_estimated = align;
1388 }
1389
1390 /* stack_alignment_needed > PREFERRED_STACK_BOUNDARY is permitted.
1391 So here we only make sure stack_alignment_needed >= align. */
1392 if (crtl->stack_alignment_needed < align)
1393 crtl->stack_alignment_needed = align;
1394 if (crtl->max_used_stack_slot_alignment < align)
1395 crtl->max_used_stack_slot_alignment = align;
1396 }
1397
1398 /* Create RTL for an SSA partition. */
1399
1400 static void
1401 expand_one_ssa_partition (tree var)
1402 {
1403 int part = var_to_partition (SA.map, var);
1404 gcc_assert (part != NO_PARTITION);
1405
1406 if (SA.partition_to_pseudo[part])
1407 return;
1408
1409 unsigned int align = MINIMUM_ALIGNMENT (TREE_TYPE (var),
1410 TYPE_MODE (TREE_TYPE (var)),
1411 TYPE_ALIGN (TREE_TYPE (var)));
1412
1413 /* If the variable alignment is very large we'll dynamicaly allocate
1414 it, which means that in-frame portion is just a pointer. */
1415 if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
1416 align = GET_MODE_ALIGNMENT (Pmode);
1417
1418 record_alignment_for_reg_var (align);
1419
1420 if (!use_register_for_decl (var))
1421 {
1422 if (defer_stack_allocation (var, true))
1423 add_stack_var (var, true);
1424 else
1425 expand_one_stack_var_1 (var);
1426 return;
1427 }
1428
1429 machine_mode reg_mode = promote_ssa_mode (var, NULL);
1430 rtx x = gen_reg_rtx (reg_mode);
1431
1432 set_rtl (var, x);
1433
1434 /* For a promoted variable, X will not be used directly but wrapped in a
1435 SUBREG with SUBREG_PROMOTED_VAR_P set, which means that the RTL land
1436 will assume that its upper bits can be inferred from its lower bits.
1437 Therefore, if X isn't initialized on every path from the entry, then
1438 we must do it manually in order to fulfill the above assumption. */
1439 if (reg_mode != TYPE_MODE (TREE_TYPE (var))
1440 && bitmap_bit_p (SA.partitions_for_undefined_values, part))
1441 emit_move_insn (x, CONST0_RTX (reg_mode));
1442 }
1443
1444 /* Record the association between the RTL generated for partition PART
1445 and the underlying variable of the SSA_NAME VAR. */
1446
1447 static void
1448 adjust_one_expanded_partition_var (tree var)
1449 {
1450 if (!var)
1451 return;
1452
1453 tree decl = SSA_NAME_VAR (var);
1454
1455 int part = var_to_partition (SA.map, var);
1456 if (part == NO_PARTITION)
1457 return;
1458
1459 rtx x = SA.partition_to_pseudo[part];
1460
1461 gcc_assert (x);
1462
1463 set_rtl (var, x);
1464
1465 if (!REG_P (x))
1466 return;
1467
1468 /* Note if the object is a user variable. */
1469 if (decl && !DECL_ARTIFICIAL (decl))
1470 mark_user_reg (x);
1471
1472 if (POINTER_TYPE_P (decl ? TREE_TYPE (decl) : TREE_TYPE (var)))
1473 mark_reg_pointer (x, get_pointer_alignment (var));
1474 }
1475
1476 /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL
1477 that will reside in a pseudo register. */
1478
1479 static void
1480 expand_one_register_var (tree var)
1481 {
1482 if (TREE_CODE (var) == SSA_NAME)
1483 {
1484 int part = var_to_partition (SA.map, var);
1485 if (part != NO_PARTITION)
1486 {
1487 rtx x = SA.partition_to_pseudo[part];
1488 gcc_assert (x);
1489 gcc_assert (REG_P (x));
1490 return;
1491 }
1492 gcc_unreachable ();
1493 }
1494
1495 tree decl = var;
1496 tree type = TREE_TYPE (decl);
1497 machine_mode reg_mode = promote_decl_mode (decl, NULL);
1498 rtx x = gen_reg_rtx (reg_mode);
1499
1500 set_rtl (var, x);
1501
1502 /* Note if the object is a user variable. */
1503 if (!DECL_ARTIFICIAL (decl))
1504 mark_user_reg (x);
1505
1506 if (POINTER_TYPE_P (type))
1507 mark_reg_pointer (x, get_pointer_alignment (var));
1508 }
1509
1510 /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL that
1511 has some associated error, e.g. its type is error-mark. We just need
1512 to pick something that won't crash the rest of the compiler. */
1513
1514 static void
1515 expand_one_error_var (tree var)
1516 {
1517 machine_mode mode = DECL_MODE (var);
1518 rtx x;
1519
1520 if (mode == BLKmode)
1521 x = gen_rtx_MEM (BLKmode, const0_rtx);
1522 else if (mode == VOIDmode)
1523 x = const0_rtx;
1524 else
1525 x = gen_reg_rtx (mode);
1526
1527 SET_DECL_RTL (var, x);
1528 }
1529
1530 /* A subroutine of expand_one_var. VAR is a variable that will be
1531 allocated to the local stack frame. Return true if we wish to
1532 add VAR to STACK_VARS so that it will be coalesced with other
1533 variables. Return false to allocate VAR immediately.
1534
1535 This function is used to reduce the number of variables considered
1536 for coalescing, which reduces the size of the quadratic problem. */
1537
1538 static bool
1539 defer_stack_allocation (tree var, bool toplevel)
1540 {
1541 tree size_unit = TREE_CODE (var) == SSA_NAME
1542 ? TYPE_SIZE_UNIT (TREE_TYPE (var))
1543 : DECL_SIZE_UNIT (var);
1544 poly_uint64 size;
1545
1546 /* Whether the variable is small enough for immediate allocation not to be
1547 a problem with regard to the frame size. */
1548 bool smallish
1549 = (poly_int_tree_p (size_unit, &size)
1550 && (estimated_poly_value (size)
1551 < PARAM_VALUE (PARAM_MIN_SIZE_FOR_STACK_SHARING)));
1552
1553 /* If stack protection is enabled, *all* stack variables must be deferred,
1554 so that we can re-order the strings to the top of the frame.
1555 Similarly for Address Sanitizer. */
1556 if (flag_stack_protect || asan_sanitize_stack_p ())
1557 return true;
1558
1559 unsigned int align = TREE_CODE (var) == SSA_NAME
1560 ? TYPE_ALIGN (TREE_TYPE (var))
1561 : DECL_ALIGN (var);
1562
1563 /* We handle "large" alignment via dynamic allocation. We want to handle
1564 this extra complication in only one place, so defer them. */
1565 if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
1566 return true;
1567
1568 bool ignored = TREE_CODE (var) == SSA_NAME
1569 ? !SSAVAR (var) || DECL_IGNORED_P (SSA_NAME_VAR (var))
1570 : DECL_IGNORED_P (var);
1571
1572 /* When optimization is enabled, DECL_IGNORED_P variables originally scoped
1573 might be detached from their block and appear at toplevel when we reach
1574 here. We want to coalesce them with variables from other blocks when
1575 the immediate contribution to the frame size would be noticeable. */
1576 if (toplevel && optimize > 0 && ignored && !smallish)
1577 return true;
1578
1579 /* Variables declared in the outermost scope automatically conflict
1580 with every other variable. The only reason to want to defer them
1581 at all is that, after sorting, we can more efficiently pack
1582 small variables in the stack frame. Continue to defer at -O2. */
1583 if (toplevel && optimize < 2)
1584 return false;
1585
1586 /* Without optimization, *most* variables are allocated from the
1587 stack, which makes the quadratic problem large exactly when we
1588 want compilation to proceed as quickly as possible. On the
1589 other hand, we don't want the function's stack frame size to
1590 get completely out of hand. So we avoid adding scalars and
1591 "small" aggregates to the list at all. */
1592 if (optimize == 0 && smallish)
1593 return false;
1594
1595 return true;
1596 }
1597
1598 /* A subroutine of expand_used_vars. Expand one variable according to
1599 its flavor. Variables to be placed on the stack are not actually
1600 expanded yet, merely recorded.
1601 When REALLY_EXPAND is false, only add stack values to be allocated.
1602 Return stack usage this variable is supposed to take.
1603 */
1604
1605 static poly_uint64
1606 expand_one_var (tree var, bool toplevel, bool really_expand)
1607 {
1608 unsigned int align = BITS_PER_UNIT;
1609 tree origvar = var;
1610
1611 var = SSAVAR (var);
1612
1613 if (TREE_TYPE (var) != error_mark_node && VAR_P (var))
1614 {
1615 if (is_global_var (var))
1616 return 0;
1617
1618 /* Because we don't know if VAR will be in register or on stack,
1619 we conservatively assume it will be on stack even if VAR is
1620 eventually put into register after RA pass. For non-automatic
1621 variables, which won't be on stack, we collect alignment of
1622 type and ignore user specified alignment. Similarly for
1623 SSA_NAMEs for which use_register_for_decl returns true. */
1624 if (TREE_STATIC (var)
1625 || DECL_EXTERNAL (var)
1626 || (TREE_CODE (origvar) == SSA_NAME && use_register_for_decl (var)))
1627 align = MINIMUM_ALIGNMENT (TREE_TYPE (var),
1628 TYPE_MODE (TREE_TYPE (var)),
1629 TYPE_ALIGN (TREE_TYPE (var)));
1630 else if (DECL_HAS_VALUE_EXPR_P (var)
1631 || (DECL_RTL_SET_P (var) && MEM_P (DECL_RTL (var))))
1632 /* Don't consider debug only variables with DECL_HAS_VALUE_EXPR_P set
1633 or variables which were assigned a stack slot already by
1634 expand_one_stack_var_at - in the latter case DECL_ALIGN has been
1635 changed from the offset chosen to it. */
1636 align = crtl->stack_alignment_estimated;
1637 else
1638 align = MINIMUM_ALIGNMENT (var, DECL_MODE (var), DECL_ALIGN (var));
1639
1640 /* If the variable alignment is very large we'll dynamicaly allocate
1641 it, which means that in-frame portion is just a pointer. */
1642 if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
1643 align = GET_MODE_ALIGNMENT (Pmode);
1644 }
1645
1646 record_alignment_for_reg_var (align);
1647
1648 poly_uint64 size;
1649 if (TREE_CODE (origvar) == SSA_NAME)
1650 {
1651 gcc_assert (!VAR_P (var)
1652 || (!DECL_EXTERNAL (var)
1653 && !DECL_HAS_VALUE_EXPR_P (var)
1654 && !TREE_STATIC (var)
1655 && TREE_TYPE (var) != error_mark_node
1656 && !DECL_HARD_REGISTER (var)
1657 && really_expand));
1658 }
1659 if (!VAR_P (var) && TREE_CODE (origvar) != SSA_NAME)
1660 ;
1661 else if (DECL_EXTERNAL (var))
1662 ;
1663 else if (DECL_HAS_VALUE_EXPR_P (var))
1664 ;
1665 else if (TREE_STATIC (var))
1666 ;
1667 else if (TREE_CODE (origvar) != SSA_NAME && DECL_RTL_SET_P (var))
1668 ;
1669 else if (TREE_TYPE (var) == error_mark_node)
1670 {
1671 if (really_expand)
1672 expand_one_error_var (var);
1673 }
1674 else if (VAR_P (var) && DECL_HARD_REGISTER (var))
1675 {
1676 if (really_expand)
1677 {
1678 expand_one_hard_reg_var (var);
1679 if (!DECL_HARD_REGISTER (var))
1680 /* Invalid register specification. */
1681 expand_one_error_var (var);
1682 }
1683 }
1684 else if (use_register_for_decl (var))
1685 {
1686 if (really_expand)
1687 expand_one_register_var (origvar);
1688 }
1689 else if (!poly_int_tree_p (DECL_SIZE_UNIT (var), &size)
1690 || !valid_constant_size_p (DECL_SIZE_UNIT (var)))
1691 {
1692 /* Reject variables which cover more than half of the address-space. */
1693 if (really_expand)
1694 {
1695 if (DECL_NONLOCAL_FRAME (var))
1696 error_at (DECL_SOURCE_LOCATION (current_function_decl),
1697 "total size of local objects is too large");
1698 else
1699 error_at (DECL_SOURCE_LOCATION (var),
1700 "size of variable %q+D is too large", var);
1701 expand_one_error_var (var);
1702 }
1703 }
1704 else if (defer_stack_allocation (var, toplevel))
1705 add_stack_var (origvar, really_expand);
1706 else
1707 {
1708 if (really_expand)
1709 {
1710 if (lookup_attribute ("naked",
1711 DECL_ATTRIBUTES (current_function_decl)))
1712 error ("cannot allocate stack for variable %q+D, naked function",
1713 var);
1714
1715 expand_one_stack_var (origvar);
1716 }
1717 return size;
1718 }
1719 return 0;
1720 }
1721
1722 /* A subroutine of expand_used_vars. Walk down through the BLOCK tree
1723 expanding variables. Those variables that can be put into registers
1724 are allocated pseudos; those that can't are put on the stack.
1725
1726 TOPLEVEL is true if this is the outermost BLOCK. */
1727
1728 static void
1729 expand_used_vars_for_block (tree block, bool toplevel)
1730 {
1731 tree t;
1732
1733 /* Expand all variables at this level. */
1734 for (t = BLOCK_VARS (block); t ; t = DECL_CHAIN (t))
1735 if (TREE_USED (t)
1736 && ((!VAR_P (t) && TREE_CODE (t) != RESULT_DECL)
1737 || !DECL_NONSHAREABLE (t)))
1738 expand_one_var (t, toplevel, true);
1739
1740 /* Expand all variables at containing levels. */
1741 for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t))
1742 expand_used_vars_for_block (t, false);
1743 }
1744
1745 /* A subroutine of expand_used_vars. Walk down through the BLOCK tree
1746 and clear TREE_USED on all local variables. */
1747
1748 static void
1749 clear_tree_used (tree block)
1750 {
1751 tree t;
1752
1753 for (t = BLOCK_VARS (block); t ; t = DECL_CHAIN (t))
1754 /* if (!TREE_STATIC (t) && !DECL_EXTERNAL (t)) */
1755 if ((!VAR_P (t) && TREE_CODE (t) != RESULT_DECL)
1756 || !DECL_NONSHAREABLE (t))
1757 TREE_USED (t) = 0;
1758
1759 for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t))
1760 clear_tree_used (t);
1761 }
1762
1763 enum {
1764 SPCT_FLAG_DEFAULT = 1,
1765 SPCT_FLAG_ALL = 2,
1766 SPCT_FLAG_STRONG = 3,
1767 SPCT_FLAG_EXPLICIT = 4
1768 };
1769
1770 /* Examine TYPE and determine a bit mask of the following features. */
1771
1772 #define SPCT_HAS_LARGE_CHAR_ARRAY 1
1773 #define SPCT_HAS_SMALL_CHAR_ARRAY 2
1774 #define SPCT_HAS_ARRAY 4
1775 #define SPCT_HAS_AGGREGATE 8
1776
1777 static unsigned int
1778 stack_protect_classify_type (tree type)
1779 {
1780 unsigned int ret = 0;
1781 tree t;
1782
1783 switch (TREE_CODE (type))
1784 {
1785 case ARRAY_TYPE:
1786 t = TYPE_MAIN_VARIANT (TREE_TYPE (type));
1787 if (t == char_type_node
1788 || t == signed_char_type_node
1789 || t == unsigned_char_type_node)
1790 {
1791 unsigned HOST_WIDE_INT max = PARAM_VALUE (PARAM_SSP_BUFFER_SIZE);
1792 unsigned HOST_WIDE_INT len;
1793
1794 if (!TYPE_SIZE_UNIT (type)
1795 || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
1796 len = max;
1797 else
1798 len = tree_to_uhwi (TYPE_SIZE_UNIT (type));
1799
1800 if (len < max)
1801 ret = SPCT_HAS_SMALL_CHAR_ARRAY | SPCT_HAS_ARRAY;
1802 else
1803 ret = SPCT_HAS_LARGE_CHAR_ARRAY | SPCT_HAS_ARRAY;
1804 }
1805 else
1806 ret = SPCT_HAS_ARRAY;
1807 break;
1808
1809 case UNION_TYPE:
1810 case QUAL_UNION_TYPE:
1811 case RECORD_TYPE:
1812 ret = SPCT_HAS_AGGREGATE;
1813 for (t = TYPE_FIELDS (type); t ; t = TREE_CHAIN (t))
1814 if (TREE_CODE (t) == FIELD_DECL)
1815 ret |= stack_protect_classify_type (TREE_TYPE (t));
1816 break;
1817
1818 default:
1819 break;
1820 }
1821
1822 return ret;
1823 }
1824
1825 /* Return nonzero if DECL should be segregated into the "vulnerable" upper
1826 part of the local stack frame. Remember if we ever return nonzero for
1827 any variable in this function. The return value is the phase number in
1828 which the variable should be allocated. */
1829
1830 static int
1831 stack_protect_decl_phase (tree decl)
1832 {
1833 unsigned int bits = stack_protect_classify_type (TREE_TYPE (decl));
1834 int ret = 0;
1835
1836 if (bits & SPCT_HAS_SMALL_CHAR_ARRAY)
1837 has_short_buffer = true;
1838
1839 if (flag_stack_protect == SPCT_FLAG_ALL
1840 || flag_stack_protect == SPCT_FLAG_STRONG
1841 || (flag_stack_protect == SPCT_FLAG_EXPLICIT
1842 && lookup_attribute ("stack_protect",
1843 DECL_ATTRIBUTES (current_function_decl))))
1844 {
1845 if ((bits & (SPCT_HAS_SMALL_CHAR_ARRAY | SPCT_HAS_LARGE_CHAR_ARRAY))
1846 && !(bits & SPCT_HAS_AGGREGATE))
1847 ret = 1;
1848 else if (bits & SPCT_HAS_ARRAY)
1849 ret = 2;
1850 }
1851 else
1852 ret = (bits & SPCT_HAS_LARGE_CHAR_ARRAY) != 0;
1853
1854 if (ret)
1855 has_protected_decls = true;
1856
1857 return ret;
1858 }
1859
1860 /* Two helper routines that check for phase 1 and phase 2. These are used
1861 as callbacks for expand_stack_vars. */
1862
1863 static bool
1864 stack_protect_decl_phase_1 (size_t i)
1865 {
1866 return stack_protect_decl_phase (stack_vars[i].decl) == 1;
1867 }
1868
1869 static bool
1870 stack_protect_decl_phase_2 (size_t i)
1871 {
1872 return stack_protect_decl_phase (stack_vars[i].decl) == 2;
1873 }
1874
1875 /* And helper function that checks for asan phase (with stack protector
1876 it is phase 3). This is used as callback for expand_stack_vars.
1877 Returns true if any of the vars in the partition need to be protected. */
1878
1879 static bool
1880 asan_decl_phase_3 (size_t i)
1881 {
1882 while (i != EOC)
1883 {
1884 if (asan_protect_stack_decl (stack_vars[i].decl))
1885 return true;
1886 i = stack_vars[i].next;
1887 }
1888 return false;
1889 }
1890
1891 /* Ensure that variables in different stack protection phases conflict
1892 so that they are not merged and share the same stack slot. */
1893
1894 static void
1895 add_stack_protection_conflicts (void)
1896 {
1897 size_t i, j, n = stack_vars_num;
1898 unsigned char *phase;
1899
1900 phase = XNEWVEC (unsigned char, n);
1901 for (i = 0; i < n; ++i)
1902 phase[i] = stack_protect_decl_phase (stack_vars[i].decl);
1903
1904 for (i = 0; i < n; ++i)
1905 {
1906 unsigned char ph_i = phase[i];
1907 for (j = i + 1; j < n; ++j)
1908 if (ph_i != phase[j])
1909 add_stack_var_conflict (i, j);
1910 }
1911
1912 XDELETEVEC (phase);
1913 }
1914
1915 /* Create a decl for the guard at the top of the stack frame. */
1916
1917 static void
1918 create_stack_guard (void)
1919 {
1920 tree guard = build_decl (DECL_SOURCE_LOCATION (current_function_decl),
1921 VAR_DECL, NULL, ptr_type_node);
1922 TREE_THIS_VOLATILE (guard) = 1;
1923 TREE_USED (guard) = 1;
1924 expand_one_stack_var (guard);
1925 crtl->stack_protect_guard = guard;
1926 }
1927
1928 /* Prepare for expanding variables. */
1929 static void
1930 init_vars_expansion (void)
1931 {
1932 /* Conflict bitmaps, and a few related temporary bitmaps, go here. */
1933 bitmap_obstack_initialize (&stack_var_bitmap_obstack);
1934
1935 /* A map from decl to stack partition. */
1936 decl_to_stack_part = new hash_map<tree, size_t>;
1937
1938 /* Initialize local stack smashing state. */
1939 has_protected_decls = false;
1940 has_short_buffer = false;
1941 }
1942
1943 /* Free up stack variable graph data. */
1944 static void
1945 fini_vars_expansion (void)
1946 {
1947 bitmap_obstack_release (&stack_var_bitmap_obstack);
1948 if (stack_vars)
1949 XDELETEVEC (stack_vars);
1950 if (stack_vars_sorted)
1951 XDELETEVEC (stack_vars_sorted);
1952 stack_vars = NULL;
1953 stack_vars_sorted = NULL;
1954 stack_vars_alloc = stack_vars_num = 0;
1955 delete decl_to_stack_part;
1956 decl_to_stack_part = NULL;
1957 }
1958
1959 /* Make a fair guess for the size of the stack frame of the function
1960 in NODE. This doesn't have to be exact, the result is only used in
1961 the inline heuristics. So we don't want to run the full stack var
1962 packing algorithm (which is quadratic in the number of stack vars).
1963 Instead, we calculate the total size of all stack vars. This turns
1964 out to be a pretty fair estimate -- packing of stack vars doesn't
1965 happen very often. */
1966
1967 HOST_WIDE_INT
1968 estimated_stack_frame_size (struct cgraph_node *node)
1969 {
1970 poly_int64 size = 0;
1971 size_t i;
1972 tree var;
1973 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
1974
1975 push_cfun (fn);
1976
1977 init_vars_expansion ();
1978
1979 FOR_EACH_LOCAL_DECL (fn, i, var)
1980 if (auto_var_in_fn_p (var, fn->decl))
1981 size += expand_one_var (var, true, false);
1982
1983 if (stack_vars_num > 0)
1984 {
1985 /* Fake sorting the stack vars for account_stack_vars (). */
1986 stack_vars_sorted = XNEWVEC (size_t, stack_vars_num);
1987 for (i = 0; i < stack_vars_num; ++i)
1988 stack_vars_sorted[i] = i;
1989 size += account_stack_vars ();
1990 }
1991
1992 fini_vars_expansion ();
1993 pop_cfun ();
1994 return estimated_poly_value (size);
1995 }
1996
1997 /* Helper routine to check if a record or union contains an array field. */
1998
1999 static int
2000 record_or_union_type_has_array_p (const_tree tree_type)
2001 {
2002 tree fields = TYPE_FIELDS (tree_type);
2003 tree f;
2004
2005 for (f = fields; f; f = DECL_CHAIN (f))
2006 if (TREE_CODE (f) == FIELD_DECL)
2007 {
2008 tree field_type = TREE_TYPE (f);
2009 if (RECORD_OR_UNION_TYPE_P (field_type)
2010 && record_or_union_type_has_array_p (field_type))
2011 return 1;
2012 if (TREE_CODE (field_type) == ARRAY_TYPE)
2013 return 1;
2014 }
2015 return 0;
2016 }
2017
2018 /* Check if the current function has local referenced variables that
2019 have their addresses taken, contain an array, or are arrays. */
2020
2021 static bool
2022 stack_protect_decl_p ()
2023 {
2024 unsigned i;
2025 tree var;
2026
2027 FOR_EACH_LOCAL_DECL (cfun, i, var)
2028 if (!is_global_var (var))
2029 {
2030 tree var_type = TREE_TYPE (var);
2031 if (VAR_P (var)
2032 && (TREE_CODE (var_type) == ARRAY_TYPE
2033 || TREE_ADDRESSABLE (var)
2034 || (RECORD_OR_UNION_TYPE_P (var_type)
2035 && record_or_union_type_has_array_p (var_type))))
2036 return true;
2037 }
2038 return false;
2039 }
2040
2041 /* Check if the current function has calls that use a return slot. */
2042
2043 static bool
2044 stack_protect_return_slot_p ()
2045 {
2046 basic_block bb;
2047
2048 FOR_ALL_BB_FN (bb, cfun)
2049 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
2050 !gsi_end_p (gsi); gsi_next (&gsi))
2051 {
2052 gimple *stmt = gsi_stmt (gsi);
2053 /* This assumes that calls to internal-only functions never
2054 use a return slot. */
2055 if (is_gimple_call (stmt)
2056 && !gimple_call_internal_p (stmt)
2057 && aggregate_value_p (TREE_TYPE (gimple_call_fntype (stmt)),
2058 gimple_call_fndecl (stmt)))
2059 return true;
2060 }
2061 return false;
2062 }
2063
2064 /* Expand all variables used in the function. */
2065
2066 static rtx_insn *
2067 expand_used_vars (void)
2068 {
2069 tree var, outer_block = DECL_INITIAL (current_function_decl);
2070 auto_vec<tree> maybe_local_decls;
2071 rtx_insn *var_end_seq = NULL;
2072 unsigned i;
2073 unsigned len;
2074 bool gen_stack_protect_signal = false;
2075
2076 /* Compute the phase of the stack frame for this function. */
2077 {
2078 int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
2079 int off = targetm.starting_frame_offset () % align;
2080 frame_phase = off ? align - off : 0;
2081 }
2082
2083 /* Set TREE_USED on all variables in the local_decls. */
2084 FOR_EACH_LOCAL_DECL (cfun, i, var)
2085 TREE_USED (var) = 1;
2086 /* Clear TREE_USED on all variables associated with a block scope. */
2087 clear_tree_used (DECL_INITIAL (current_function_decl));
2088
2089 init_vars_expansion ();
2090
2091 if (targetm.use_pseudo_pic_reg ())
2092 pic_offset_table_rtx = gen_reg_rtx (Pmode);
2093
2094 for (i = 0; i < SA.map->num_partitions; i++)
2095 {
2096 if (bitmap_bit_p (SA.partitions_for_parm_default_defs, i))
2097 continue;
2098
2099 tree var = partition_to_var (SA.map, i);
2100
2101 gcc_assert (!virtual_operand_p (var));
2102
2103 expand_one_ssa_partition (var);
2104 }
2105
2106 if (flag_stack_protect == SPCT_FLAG_STRONG)
2107 gen_stack_protect_signal
2108 = stack_protect_decl_p () || stack_protect_return_slot_p ();
2109
2110 /* At this point all variables on the local_decls with TREE_USED
2111 set are not associated with any block scope. Lay them out. */
2112
2113 len = vec_safe_length (cfun->local_decls);
2114 FOR_EACH_LOCAL_DECL (cfun, i, var)
2115 {
2116 bool expand_now = false;
2117
2118 /* Expanded above already. */
2119 if (is_gimple_reg (var))
2120 {
2121 TREE_USED (var) = 0;
2122 goto next;
2123 }
2124 /* We didn't set a block for static or extern because it's hard
2125 to tell the difference between a global variable (re)declared
2126 in a local scope, and one that's really declared there to
2127 begin with. And it doesn't really matter much, since we're
2128 not giving them stack space. Expand them now. */
2129 else if (TREE_STATIC (var) || DECL_EXTERNAL (var))
2130 expand_now = true;
2131
2132 /* Expand variables not associated with any block now. Those created by
2133 the optimizers could be live anywhere in the function. Those that
2134 could possibly have been scoped originally and detached from their
2135 block will have their allocation deferred so we coalesce them with
2136 others when optimization is enabled. */
2137 else if (TREE_USED (var))
2138 expand_now = true;
2139
2140 /* Finally, mark all variables on the list as used. We'll use
2141 this in a moment when we expand those associated with scopes. */
2142 TREE_USED (var) = 1;
2143
2144 if (expand_now)
2145 expand_one_var (var, true, true);
2146
2147 next:
2148 if (DECL_ARTIFICIAL (var) && !DECL_IGNORED_P (var))
2149 {
2150 rtx rtl = DECL_RTL_IF_SET (var);
2151
2152 /* Keep artificial non-ignored vars in cfun->local_decls
2153 chain until instantiate_decls. */
2154 if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT))
2155 add_local_decl (cfun, var);
2156 else if (rtl == NULL_RTX)
2157 /* If rtl isn't set yet, which can happen e.g. with
2158 -fstack-protector, retry before returning from this
2159 function. */
2160 maybe_local_decls.safe_push (var);
2161 }
2162 }
2163
2164 /* We duplicated some of the decls in CFUN->LOCAL_DECLS.
2165
2166 +-----------------+-----------------+
2167 | ...processed... | ...duplicates...|
2168 +-----------------+-----------------+
2169 ^
2170 +-- LEN points here.
2171
2172 We just want the duplicates, as those are the artificial
2173 non-ignored vars that we want to keep until instantiate_decls.
2174 Move them down and truncate the array. */
2175 if (!vec_safe_is_empty (cfun->local_decls))
2176 cfun->local_decls->block_remove (0, len);
2177
2178 /* At this point, all variables within the block tree with TREE_USED
2179 set are actually used by the optimized function. Lay them out. */
2180 expand_used_vars_for_block (outer_block, true);
2181
2182 if (stack_vars_num > 0)
2183 {
2184 add_scope_conflicts ();
2185
2186 /* If stack protection is enabled, we don't share space between
2187 vulnerable data and non-vulnerable data. */
2188 if (flag_stack_protect != 0
2189 && (flag_stack_protect != SPCT_FLAG_EXPLICIT
2190 || (flag_stack_protect == SPCT_FLAG_EXPLICIT
2191 && lookup_attribute ("stack_protect",
2192 DECL_ATTRIBUTES (current_function_decl)))))
2193 add_stack_protection_conflicts ();
2194
2195 /* Now that we have collected all stack variables, and have computed a
2196 minimal interference graph, attempt to save some stack space. */
2197 partition_stack_vars ();
2198 if (dump_file)
2199 dump_stack_var_partition ();
2200 }
2201
2202 switch (flag_stack_protect)
2203 {
2204 case SPCT_FLAG_ALL:
2205 create_stack_guard ();
2206 break;
2207
2208 case SPCT_FLAG_STRONG:
2209 if (gen_stack_protect_signal
2210 || cfun->calls_alloca || has_protected_decls
2211 || lookup_attribute ("stack_protect",
2212 DECL_ATTRIBUTES (current_function_decl)))
2213 create_stack_guard ();
2214 break;
2215
2216 case SPCT_FLAG_DEFAULT:
2217 if (cfun->calls_alloca || has_protected_decls
2218 || lookup_attribute ("stack_protect",
2219 DECL_ATTRIBUTES (current_function_decl)))
2220 create_stack_guard ();
2221 break;
2222
2223 case SPCT_FLAG_EXPLICIT:
2224 if (lookup_attribute ("stack_protect",
2225 DECL_ATTRIBUTES (current_function_decl)))
2226 create_stack_guard ();
2227 break;
2228 default:
2229 ;
2230 }
2231
2232 /* Assign rtl to each variable based on these partitions. */
2233 if (stack_vars_num > 0)
2234 {
2235 class stack_vars_data data;
2236
2237 data.asan_base = NULL_RTX;
2238 data.asan_alignb = 0;
2239
2240 /* Reorder decls to be protected by iterating over the variables
2241 array multiple times, and allocating out of each phase in turn. */
2242 /* ??? We could probably integrate this into the qsort we did
2243 earlier, such that we naturally see these variables first,
2244 and thus naturally allocate things in the right order. */
2245 if (has_protected_decls)
2246 {
2247 /* Phase 1 contains only character arrays. */
2248 expand_stack_vars (stack_protect_decl_phase_1, &data);
2249
2250 /* Phase 2 contains other kinds of arrays. */
2251 if (flag_stack_protect == SPCT_FLAG_ALL
2252 || flag_stack_protect == SPCT_FLAG_STRONG
2253 || (flag_stack_protect == SPCT_FLAG_EXPLICIT
2254 && lookup_attribute ("stack_protect",
2255 DECL_ATTRIBUTES (current_function_decl))))
2256 expand_stack_vars (stack_protect_decl_phase_2, &data);
2257 }
2258
2259 if (asan_sanitize_stack_p ())
2260 /* Phase 3, any partitions that need asan protection
2261 in addition to phase 1 and 2. */
2262 expand_stack_vars (asan_decl_phase_3, &data);
2263
2264 /* ASAN description strings don't yet have a syntax for expressing
2265 polynomial offsets. */
2266 HOST_WIDE_INT prev_offset;
2267 if (!data.asan_vec.is_empty ()
2268 && frame_offset.is_constant (&prev_offset))
2269 {
2270 HOST_WIDE_INT offset, sz, redzonesz;
2271 redzonesz = ASAN_RED_ZONE_SIZE;
2272 sz = data.asan_vec[0] - prev_offset;
2273 if (data.asan_alignb > ASAN_RED_ZONE_SIZE
2274 && data.asan_alignb <= 4096
2275 && sz + ASAN_RED_ZONE_SIZE >= (int) data.asan_alignb)
2276 redzonesz = ((sz + ASAN_RED_ZONE_SIZE + data.asan_alignb - 1)
2277 & ~(data.asan_alignb - HOST_WIDE_INT_1)) - sz;
2278 /* Allocating a constant amount of space from a constant
2279 starting offset must give a constant result. */
2280 offset = (alloc_stack_frame_space (redzonesz, ASAN_RED_ZONE_SIZE)
2281 .to_constant ());
2282 data.asan_vec.safe_push (prev_offset);
2283 data.asan_vec.safe_push (offset);
2284 /* Leave space for alignment if STRICT_ALIGNMENT. */
2285 if (STRICT_ALIGNMENT)
2286 alloc_stack_frame_space ((GET_MODE_ALIGNMENT (SImode)
2287 << ASAN_SHADOW_SHIFT)
2288 / BITS_PER_UNIT, 1);
2289
2290 var_end_seq
2291 = asan_emit_stack_protection (virtual_stack_vars_rtx,
2292 data.asan_base,
2293 data.asan_alignb,
2294 data.asan_vec.address (),
2295 data.asan_decl_vec.address (),
2296 data.asan_vec.length ());
2297 }
2298
2299 expand_stack_vars (NULL, &data);
2300 }
2301
2302 if (asan_sanitize_allocas_p () && cfun->calls_alloca)
2303 var_end_seq = asan_emit_allocas_unpoison (virtual_stack_dynamic_rtx,
2304 virtual_stack_vars_rtx,
2305 var_end_seq);
2306
2307 fini_vars_expansion ();
2308
2309 /* If there were any artificial non-ignored vars without rtl
2310 found earlier, see if deferred stack allocation hasn't assigned
2311 rtl to them. */
2312 FOR_EACH_VEC_ELT_REVERSE (maybe_local_decls, i, var)
2313 {
2314 rtx rtl = DECL_RTL_IF_SET (var);
2315
2316 /* Keep artificial non-ignored vars in cfun->local_decls
2317 chain until instantiate_decls. */
2318 if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT))
2319 add_local_decl (cfun, var);
2320 }
2321
2322 /* If the target requires that FRAME_OFFSET be aligned, do it. */
2323 if (STACK_ALIGNMENT_NEEDED)
2324 {
2325 HOST_WIDE_INT align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
2326 if (FRAME_GROWS_DOWNWARD)
2327 frame_offset = aligned_lower_bound (frame_offset, align);
2328 else
2329 frame_offset = aligned_upper_bound (frame_offset, align);
2330 }
2331
2332 return var_end_seq;
2333 }
2334
2335
2336 /* If we need to produce a detailed dump, print the tree representation
2337 for STMT to the dump file. SINCE is the last RTX after which the RTL
2338 generated for STMT should have been appended. */
2339
2340 static void
2341 maybe_dump_rtl_for_gimple_stmt (gimple *stmt, rtx_insn *since)
2342 {
2343 if (dump_file && (dump_flags & TDF_DETAILS))
2344 {
2345 fprintf (dump_file, "\n;; ");
2346 print_gimple_stmt (dump_file, stmt, 0,
2347 TDF_SLIM | (dump_flags & TDF_LINENO));
2348 fprintf (dump_file, "\n");
2349
2350 print_rtl (dump_file, since ? NEXT_INSN (since) : since);
2351 }
2352 }
2353
2354 /* Maps the blocks that do not contain tree labels to rtx labels. */
2355
2356 static hash_map<basic_block, rtx_code_label *> *lab_rtx_for_bb;
2357
2358 /* Returns the label_rtx expression for a label starting basic block BB. */
2359
2360 static rtx_code_label *
2361 label_rtx_for_bb (basic_block bb ATTRIBUTE_UNUSED)
2362 {
2363 gimple_stmt_iterator gsi;
2364 tree lab;
2365
2366 if (bb->flags & BB_RTL)
2367 return block_label (bb);
2368
2369 rtx_code_label **elt = lab_rtx_for_bb->get (bb);
2370 if (elt)
2371 return *elt;
2372
2373 /* Find the tree label if it is present. */
2374
2375 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2376 {
2377 glabel *lab_stmt;
2378
2379 lab_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
2380 if (!lab_stmt)
2381 break;
2382
2383 lab = gimple_label_label (lab_stmt);
2384 if (DECL_NONLOCAL (lab))
2385 break;
2386
2387 return jump_target_rtx (lab);
2388 }
2389
2390 rtx_code_label *l = gen_label_rtx ();
2391 lab_rtx_for_bb->put (bb, l);
2392 return l;
2393 }
2394
2395
2396 /* A subroutine of expand_gimple_cond. Given E, a fallthrough edge
2397 of a basic block where we just expanded the conditional at the end,
2398 possibly clean up the CFG and instruction sequence. LAST is the
2399 last instruction before the just emitted jump sequence. */
2400
2401 static void
2402 maybe_cleanup_end_of_block (edge e, rtx_insn *last)
2403 {
2404 /* Special case: when jumpif decides that the condition is
2405 trivial it emits an unconditional jump (and the necessary
2406 barrier). But we still have two edges, the fallthru one is
2407 wrong. purge_dead_edges would clean this up later. Unfortunately
2408 we have to insert insns (and split edges) before
2409 find_many_sub_basic_blocks and hence before purge_dead_edges.
2410 But splitting edges might create new blocks which depend on the
2411 fact that if there are two edges there's no barrier. So the
2412 barrier would get lost and verify_flow_info would ICE. Instead
2413 of auditing all edge splitters to care for the barrier (which
2414 normally isn't there in a cleaned CFG), fix it here. */
2415 if (BARRIER_P (get_last_insn ()))
2416 {
2417 rtx_insn *insn;
2418 remove_edge (e);
2419 /* Now, we have a single successor block, if we have insns to
2420 insert on the remaining edge we potentially will insert
2421 it at the end of this block (if the dest block isn't feasible)
2422 in order to avoid splitting the edge. This insertion will take
2423 place in front of the last jump. But we might have emitted
2424 multiple jumps (conditional and one unconditional) to the
2425 same destination. Inserting in front of the last one then
2426 is a problem. See PR 40021. We fix this by deleting all
2427 jumps except the last unconditional one. */
2428 insn = PREV_INSN (get_last_insn ());
2429 /* Make sure we have an unconditional jump. Otherwise we're
2430 confused. */
2431 gcc_assert (JUMP_P (insn) && !any_condjump_p (insn));
2432 for (insn = PREV_INSN (insn); insn != last;)
2433 {
2434 insn = PREV_INSN (insn);
2435 if (JUMP_P (NEXT_INSN (insn)))
2436 {
2437 if (!any_condjump_p (NEXT_INSN (insn)))
2438 {
2439 gcc_assert (BARRIER_P (NEXT_INSN (NEXT_INSN (insn))));
2440 delete_insn (NEXT_INSN (NEXT_INSN (insn)));
2441 }
2442 delete_insn (NEXT_INSN (insn));
2443 }
2444 }
2445 }
2446 }
2447
2448 /* A subroutine of expand_gimple_basic_block. Expand one GIMPLE_COND.
2449 Returns a new basic block if we've terminated the current basic
2450 block and created a new one. */
2451
2452 static basic_block
2453 expand_gimple_cond (basic_block bb, gcond *stmt)
2454 {
2455 basic_block new_bb, dest;
2456 edge true_edge;
2457 edge false_edge;
2458 rtx_insn *last2, *last;
2459 enum tree_code code;
2460 tree op0, op1;
2461
2462 code = gimple_cond_code (stmt);
2463 op0 = gimple_cond_lhs (stmt);
2464 op1 = gimple_cond_rhs (stmt);
2465 /* We're sometimes presented with such code:
2466 D.123_1 = x < y;
2467 if (D.123_1 != 0)
2468 ...
2469 This would expand to two comparisons which then later might
2470 be cleaned up by combine. But some pattern matchers like if-conversion
2471 work better when there's only one compare, so make up for this
2472 here as special exception if TER would have made the same change. */
2473 if (SA.values
2474 && TREE_CODE (op0) == SSA_NAME
2475 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
2476 && TREE_CODE (op1) == INTEGER_CST
2477 && ((gimple_cond_code (stmt) == NE_EXPR
2478 && integer_zerop (op1))
2479 || (gimple_cond_code (stmt) == EQ_EXPR
2480 && integer_onep (op1)))
2481 && bitmap_bit_p (SA.values, SSA_NAME_VERSION (op0)))
2482 {
2483 gimple *second = SSA_NAME_DEF_STMT (op0);
2484 if (gimple_code (second) == GIMPLE_ASSIGN)
2485 {
2486 enum tree_code code2 = gimple_assign_rhs_code (second);
2487 if (TREE_CODE_CLASS (code2) == tcc_comparison)
2488 {
2489 code = code2;
2490 op0 = gimple_assign_rhs1 (second);
2491 op1 = gimple_assign_rhs2 (second);
2492 }
2493 /* If jumps are cheap and the target does not support conditional
2494 compare, turn some more codes into jumpy sequences. */
2495 else if (BRANCH_COST (optimize_insn_for_speed_p (), false) < 4
2496 && targetm.gen_ccmp_first == NULL)
2497 {
2498 if ((code2 == BIT_AND_EXPR
2499 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
2500 && TREE_CODE (gimple_assign_rhs2 (second)) != INTEGER_CST)
2501 || code2 == TRUTH_AND_EXPR)
2502 {
2503 code = TRUTH_ANDIF_EXPR;
2504 op0 = gimple_assign_rhs1 (second);
2505 op1 = gimple_assign_rhs2 (second);
2506 }
2507 else if (code2 == BIT_IOR_EXPR || code2 == TRUTH_OR_EXPR)
2508 {
2509 code = TRUTH_ORIF_EXPR;
2510 op0 = gimple_assign_rhs1 (second);
2511 op1 = gimple_assign_rhs2 (second);
2512 }
2513 }
2514 }
2515 }
2516
2517 /* Optimize (x % C1) == C2 or (x % C1) != C2 if it is beneficial
2518 into (x - C2) * C3 < C4. */
2519 if ((code == EQ_EXPR || code == NE_EXPR)
2520 && TREE_CODE (op0) == SSA_NAME
2521 && TREE_CODE (op1) == INTEGER_CST)
2522 code = maybe_optimize_mod_cmp (code, &op0, &op1);
2523
2524 last2 = last = get_last_insn ();
2525
2526 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2527 set_curr_insn_location (gimple_location (stmt));
2528
2529 /* These flags have no purpose in RTL land. */
2530 true_edge->flags &= ~EDGE_TRUE_VALUE;
2531 false_edge->flags &= ~EDGE_FALSE_VALUE;
2532
2533 /* We can either have a pure conditional jump with one fallthru edge or
2534 two-way jump that needs to be decomposed into two basic blocks. */
2535 if (false_edge->dest == bb->next_bb)
2536 {
2537 jumpif_1 (code, op0, op1, label_rtx_for_bb (true_edge->dest),
2538 true_edge->probability);
2539 maybe_dump_rtl_for_gimple_stmt (stmt, last);
2540 if (true_edge->goto_locus != UNKNOWN_LOCATION)
2541 set_curr_insn_location (true_edge->goto_locus);
2542 false_edge->flags |= EDGE_FALLTHRU;
2543 maybe_cleanup_end_of_block (false_edge, last);
2544 return NULL;
2545 }
2546 if (true_edge->dest == bb->next_bb)
2547 {
2548 jumpifnot_1 (code, op0, op1, label_rtx_for_bb (false_edge->dest),
2549 false_edge->probability);
2550 maybe_dump_rtl_for_gimple_stmt (stmt, last);
2551 if (false_edge->goto_locus != UNKNOWN_LOCATION)
2552 set_curr_insn_location (false_edge->goto_locus);
2553 true_edge->flags |= EDGE_FALLTHRU;
2554 maybe_cleanup_end_of_block (true_edge, last);
2555 return NULL;
2556 }
2557
2558 jumpif_1 (code, op0, op1, label_rtx_for_bb (true_edge->dest),
2559 true_edge->probability);
2560 last = get_last_insn ();
2561 if (false_edge->goto_locus != UNKNOWN_LOCATION)
2562 set_curr_insn_location (false_edge->goto_locus);
2563 emit_jump (label_rtx_for_bb (false_edge->dest));
2564
2565 BB_END (bb) = last;
2566 if (BARRIER_P (BB_END (bb)))
2567 BB_END (bb) = PREV_INSN (BB_END (bb));
2568 update_bb_for_insn (bb);
2569
2570 new_bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb);
2571 dest = false_edge->dest;
2572 redirect_edge_succ (false_edge, new_bb);
2573 false_edge->flags |= EDGE_FALLTHRU;
2574 new_bb->count = false_edge->count ();
2575 loop_p loop = find_common_loop (bb->loop_father, dest->loop_father);
2576 add_bb_to_loop (new_bb, loop);
2577 if (loop->latch == bb
2578 && loop->header == dest)
2579 loop->latch = new_bb;
2580 make_single_succ_edge (new_bb, dest, 0);
2581 if (BARRIER_P (BB_END (new_bb)))
2582 BB_END (new_bb) = PREV_INSN (BB_END (new_bb));
2583 update_bb_for_insn (new_bb);
2584
2585 maybe_dump_rtl_for_gimple_stmt (stmt, last2);
2586
2587 if (true_edge->goto_locus != UNKNOWN_LOCATION)
2588 {
2589 set_curr_insn_location (true_edge->goto_locus);
2590 true_edge->goto_locus = curr_insn_location ();
2591 }
2592
2593 return new_bb;
2594 }
2595
2596 /* Mark all calls that can have a transaction restart. */
2597
2598 static void
2599 mark_transaction_restart_calls (gimple *stmt)
2600 {
2601 struct tm_restart_node dummy;
2602 tm_restart_node **slot;
2603
2604 if (!cfun->gimple_df->tm_restart)
2605 return;
2606
2607 dummy.stmt = stmt;
2608 slot = cfun->gimple_df->tm_restart->find_slot (&dummy, NO_INSERT);
2609 if (slot)
2610 {
2611 struct tm_restart_node *n = *slot;
2612 tree list = n->label_or_list;
2613 rtx_insn *insn;
2614
2615 for (insn = next_real_insn (get_last_insn ());
2616 !CALL_P (insn);
2617 insn = next_real_insn (insn))
2618 continue;
2619
2620 if (TREE_CODE (list) == LABEL_DECL)
2621 add_reg_note (insn, REG_TM, label_rtx (list));
2622 else
2623 for (; list ; list = TREE_CHAIN (list))
2624 add_reg_note (insn, REG_TM, label_rtx (TREE_VALUE (list)));
2625 }
2626 }
2627
2628 /* A subroutine of expand_gimple_stmt_1, expanding one GIMPLE_CALL
2629 statement STMT. */
2630
2631 static void
2632 expand_call_stmt (gcall *stmt)
2633 {
2634 tree exp, decl, lhs;
2635 bool builtin_p;
2636 size_t i;
2637
2638 if (gimple_call_internal_p (stmt))
2639 {
2640 expand_internal_call (stmt);
2641 return;
2642 }
2643
2644 /* If this is a call to a built-in function and it has no effect other
2645 than setting the lhs, try to implement it using an internal function
2646 instead. */
2647 decl = gimple_call_fndecl (stmt);
2648 if (gimple_call_lhs (stmt)
2649 && !gimple_has_side_effects (stmt)
2650 && (optimize || (decl && called_as_built_in (decl))))
2651 {
2652 internal_fn ifn = replacement_internal_fn (stmt);
2653 if (ifn != IFN_LAST)
2654 {
2655 expand_internal_call (ifn, stmt);
2656 return;
2657 }
2658 }
2659
2660 exp = build_vl_exp (CALL_EXPR, gimple_call_num_args (stmt) + 3);
2661
2662 CALL_EXPR_FN (exp) = gimple_call_fn (stmt);
2663 builtin_p = decl && fndecl_built_in_p (decl);
2664
2665 /* If this is not a builtin function, the function type through which the
2666 call is made may be different from the type of the function. */
2667 if (!builtin_p)
2668 CALL_EXPR_FN (exp)
2669 = fold_convert (build_pointer_type (gimple_call_fntype (stmt)),
2670 CALL_EXPR_FN (exp));
2671
2672 TREE_TYPE (exp) = gimple_call_return_type (stmt);
2673 CALL_EXPR_STATIC_CHAIN (exp) = gimple_call_chain (stmt);
2674
2675 for (i = 0; i < gimple_call_num_args (stmt); i++)
2676 {
2677 tree arg = gimple_call_arg (stmt, i);
2678 gimple *def;
2679 /* TER addresses into arguments of builtin functions so we have a
2680 chance to infer more correct alignment information. See PR39954. */
2681 if (builtin_p
2682 && TREE_CODE (arg) == SSA_NAME
2683 && (def = get_gimple_for_ssa_name (arg))
2684 && gimple_assign_rhs_code (def) == ADDR_EXPR)
2685 arg = gimple_assign_rhs1 (def);
2686 CALL_EXPR_ARG (exp, i) = arg;
2687 }
2688
2689 if (gimple_has_side_effects (stmt))
2690 TREE_SIDE_EFFECTS (exp) = 1;
2691
2692 if (gimple_call_nothrow_p (stmt))
2693 TREE_NOTHROW (exp) = 1;
2694
2695 if (gimple_no_warning_p (stmt))
2696 TREE_NO_WARNING (exp) = 1;
2697
2698 CALL_EXPR_TAILCALL (exp) = gimple_call_tail_p (stmt);
2699 CALL_EXPR_MUST_TAIL_CALL (exp) = gimple_call_must_tail_p (stmt);
2700 CALL_EXPR_RETURN_SLOT_OPT (exp) = gimple_call_return_slot_opt_p (stmt);
2701 if (decl
2702 && fndecl_built_in_p (decl, BUILT_IN_NORMAL)
2703 && ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (decl)))
2704 CALL_ALLOCA_FOR_VAR_P (exp) = gimple_call_alloca_for_var_p (stmt);
2705 else
2706 CALL_FROM_THUNK_P (exp) = gimple_call_from_thunk_p (stmt);
2707 CALL_EXPR_VA_ARG_PACK (exp) = gimple_call_va_arg_pack_p (stmt);
2708 CALL_EXPR_BY_DESCRIPTOR (exp) = gimple_call_by_descriptor_p (stmt);
2709 SET_EXPR_LOCATION (exp, gimple_location (stmt));
2710
2711 /* Ensure RTL is created for debug args. */
2712 if (decl && DECL_HAS_DEBUG_ARGS_P (decl))
2713 {
2714 vec<tree, va_gc> **debug_args = decl_debug_args_lookup (decl);
2715 unsigned int ix;
2716 tree dtemp;
2717
2718 if (debug_args)
2719 for (ix = 1; (*debug_args)->iterate (ix, &dtemp); ix += 2)
2720 {
2721 gcc_assert (TREE_CODE (dtemp) == DEBUG_EXPR_DECL);
2722 expand_debug_expr (dtemp);
2723 }
2724 }
2725
2726 rtx_insn *before_call = get_last_insn ();
2727 lhs = gimple_call_lhs (stmt);
2728 if (lhs)
2729 expand_assignment (lhs, exp, false);
2730 else
2731 expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
2732
2733 /* If the gimple call is an indirect call and has 'nocf_check'
2734 attribute find a generated CALL insn to mark it as no
2735 control-flow verification is needed. */
2736 if (gimple_call_nocf_check_p (stmt)
2737 && !gimple_call_fndecl (stmt))
2738 {
2739 rtx_insn *last = get_last_insn ();
2740 while (!CALL_P (last)
2741 && last != before_call)
2742 last = PREV_INSN (last);
2743
2744 if (last != before_call)
2745 add_reg_note (last, REG_CALL_NOCF_CHECK, const0_rtx);
2746 }
2747
2748 mark_transaction_restart_calls (stmt);
2749 }
2750
2751
2752 /* Generate RTL for an asm statement (explicit assembler code).
2753 STRING is a STRING_CST node containing the assembler code text,
2754 or an ADDR_EXPR containing a STRING_CST. VOL nonzero means the
2755 insn is volatile; don't optimize it. */
2756
2757 static void
2758 expand_asm_loc (tree string, int vol, location_t locus)
2759 {
2760 rtx body;
2761
2762 body = gen_rtx_ASM_INPUT_loc (VOIDmode,
2763 ggc_strdup (TREE_STRING_POINTER (string)),
2764 locus);
2765
2766 MEM_VOLATILE_P (body) = vol;
2767
2768 /* Non-empty basic ASM implicitly clobbers memory. */
2769 if (TREE_STRING_LENGTH (string) != 0)
2770 {
2771 rtx asm_op, clob;
2772 unsigned i, nclobbers;
2773 auto_vec<rtx> input_rvec, output_rvec;
2774 auto_vec<const char *> constraints;
2775 auto_vec<rtx> clobber_rvec;
2776 HARD_REG_SET clobbered_regs;
2777 CLEAR_HARD_REG_SET (clobbered_regs);
2778
2779 clob = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
2780 clobber_rvec.safe_push (clob);
2781
2782 if (targetm.md_asm_adjust)
2783 targetm.md_asm_adjust (output_rvec, input_rvec,
2784 constraints, clobber_rvec,
2785 clobbered_regs);
2786
2787 asm_op = body;
2788 nclobbers = clobber_rvec.length ();
2789 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (1 + nclobbers));
2790
2791 XVECEXP (body, 0, 0) = asm_op;
2792 for (i = 0; i < nclobbers; i++)
2793 XVECEXP (body, 0, i + 1) = gen_rtx_CLOBBER (VOIDmode, clobber_rvec[i]);
2794 }
2795
2796 emit_insn (body);
2797 }
2798
2799 /* Return the number of times character C occurs in string S. */
2800 static int
2801 n_occurrences (int c, const char *s)
2802 {
2803 int n = 0;
2804 while (*s)
2805 n += (*s++ == c);
2806 return n;
2807 }
2808
2809 /* A subroutine of expand_asm_operands. Check that all operands have
2810 the same number of alternatives. Return true if so. */
2811
2812 static bool
2813 check_operand_nalternatives (const vec<const char *> &constraints)
2814 {
2815 unsigned len = constraints.length();
2816 if (len > 0)
2817 {
2818 int nalternatives = n_occurrences (',', constraints[0]);
2819
2820 if (nalternatives + 1 > MAX_RECOG_ALTERNATIVES)
2821 {
2822 error ("too many alternatives in %<asm%>");
2823 return false;
2824 }
2825
2826 for (unsigned i = 1; i < len; ++i)
2827 if (n_occurrences (',', constraints[i]) != nalternatives)
2828 {
2829 error ("operand constraints for %<asm%> differ "
2830 "in number of alternatives");
2831 return false;
2832 }
2833 }
2834 return true;
2835 }
2836
2837 /* Check for overlap between registers marked in CLOBBERED_REGS and
2838 anything inappropriate in T. Emit error and return the register
2839 variable definition for error, NULL_TREE for ok. */
2840
2841 static bool
2842 tree_conflicts_with_clobbers_p (tree t, HARD_REG_SET *clobbered_regs)
2843 {
2844 /* Conflicts between asm-declared register variables and the clobber
2845 list are not allowed. */
2846 tree overlap = tree_overlaps_hard_reg_set (t, clobbered_regs);
2847
2848 if (overlap)
2849 {
2850 error ("%<asm%> specifier for variable %qE conflicts with "
2851 "%<asm%> clobber list",
2852 DECL_NAME (overlap));
2853
2854 /* Reset registerness to stop multiple errors emitted for a single
2855 variable. */
2856 DECL_REGISTER (overlap) = 0;
2857 return true;
2858 }
2859
2860 return false;
2861 }
2862
2863 /* Check that the given REGNO spanning NREGS is a valid
2864 asm clobber operand. Some HW registers cannot be
2865 saved/restored, hence they should not be clobbered by
2866 asm statements. */
2867 static bool
2868 asm_clobber_reg_is_valid (int regno, int nregs, const char *regname)
2869 {
2870 bool is_valid = true;
2871 HARD_REG_SET regset;
2872
2873 CLEAR_HARD_REG_SET (regset);
2874
2875 add_range_to_hard_reg_set (&regset, regno, nregs);
2876
2877 /* Clobbering the PIC register is an error. */
2878 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
2879 && overlaps_hard_reg_set_p (regset, Pmode, PIC_OFFSET_TABLE_REGNUM))
2880 {
2881 /* ??? Diagnose during gimplification? */
2882 error ("PIC register clobbered by %qs in %<asm%>", regname);
2883 is_valid = false;
2884 }
2885 else if (!in_hard_reg_set_p
2886 (accessible_reg_set, reg_raw_mode[regno], regno))
2887 {
2888 /* ??? Diagnose during gimplification? */
2889 error ("the register %qs cannot be clobbered in %<asm%>"
2890 " for the current target", regname);
2891 is_valid = false;
2892 }
2893
2894 /* Clobbering the stack pointer register is deprecated. GCC expects
2895 the value of the stack pointer after an asm statement to be the same
2896 as it was before, so no asm can validly clobber the stack pointer in
2897 the usual sense. Adding the stack pointer to the clobber list has
2898 traditionally had some undocumented and somewhat obscure side-effects. */
2899 if (overlaps_hard_reg_set_p (regset, Pmode, STACK_POINTER_REGNUM)
2900 && warning (OPT_Wdeprecated, "listing the stack pointer register"
2901 " %qs in a clobber list is deprecated", regname))
2902 inform (input_location, "the value of the stack pointer after an %<asm%>"
2903 " statement must be the same as it was before the statement");
2904
2905 return is_valid;
2906 }
2907
2908 /* Generate RTL for an asm statement with arguments.
2909 STRING is the instruction template.
2910 OUTPUTS is a list of output arguments (lvalues); INPUTS a list of inputs.
2911 Each output or input has an expression in the TREE_VALUE and
2912 a tree list in TREE_PURPOSE which in turn contains a constraint
2913 name in TREE_VALUE (or NULL_TREE) and a constraint string
2914 in TREE_PURPOSE.
2915 CLOBBERS is a list of STRING_CST nodes each naming a hard register
2916 that is clobbered by this insn.
2917
2918 LABELS is a list of labels, and if LABELS is non-NULL, FALLTHRU_BB
2919 should be the fallthru basic block of the asm goto.
2920
2921 Not all kinds of lvalue that may appear in OUTPUTS can be stored directly.
2922 Some elements of OUTPUTS may be replaced with trees representing temporary
2923 values. The caller should copy those temporary values to the originally
2924 specified lvalues.
2925
2926 VOL nonzero means the insn is volatile; don't optimize it. */
2927
2928 static void
2929 expand_asm_stmt (gasm *stmt)
2930 {
2931 class save_input_location
2932 {
2933 location_t old;
2934
2935 public:
2936 explicit save_input_location(location_t where)
2937 {
2938 old = input_location;
2939 input_location = where;
2940 }
2941
2942 ~save_input_location()
2943 {
2944 input_location = old;
2945 }
2946 };
2947
2948 location_t locus = gimple_location (stmt);
2949
2950 if (gimple_asm_input_p (stmt))
2951 {
2952 const char *s = gimple_asm_string (stmt);
2953 tree string = build_string (strlen (s), s);
2954 expand_asm_loc (string, gimple_asm_volatile_p (stmt), locus);
2955 return;
2956 }
2957
2958 /* There are some legacy diagnostics in here, and also avoids a
2959 sixth parameger to targetm.md_asm_adjust. */
2960 save_input_location s_i_l(locus);
2961
2962 unsigned noutputs = gimple_asm_noutputs (stmt);
2963 unsigned ninputs = gimple_asm_ninputs (stmt);
2964 unsigned nlabels = gimple_asm_nlabels (stmt);
2965 unsigned i;
2966
2967 /* ??? Diagnose during gimplification? */
2968 if (ninputs + noutputs + nlabels > MAX_RECOG_OPERANDS)
2969 {
2970 error ("more than %d operands in %<asm%>", MAX_RECOG_OPERANDS);
2971 return;
2972 }
2973
2974 auto_vec<tree, MAX_RECOG_OPERANDS> output_tvec;
2975 auto_vec<tree, MAX_RECOG_OPERANDS> input_tvec;
2976 auto_vec<const char *, MAX_RECOG_OPERANDS> constraints;
2977
2978 /* Copy the gimple vectors into new vectors that we can manipulate. */
2979
2980 output_tvec.safe_grow (noutputs);
2981 input_tvec.safe_grow (ninputs);
2982 constraints.safe_grow (noutputs + ninputs);
2983
2984 for (i = 0; i < noutputs; ++i)
2985 {
2986 tree t = gimple_asm_output_op (stmt, i);
2987 output_tvec[i] = TREE_VALUE (t);
2988 constraints[i] = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t)));
2989 }
2990 for (i = 0; i < ninputs; i++)
2991 {
2992 tree t = gimple_asm_input_op (stmt, i);
2993 input_tvec[i] = TREE_VALUE (t);
2994 constraints[i + noutputs]
2995 = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t)));
2996 }
2997
2998 /* ??? Diagnose during gimplification? */
2999 if (! check_operand_nalternatives (constraints))
3000 return;
3001
3002 /* Count the number of meaningful clobbered registers, ignoring what
3003 we would ignore later. */
3004 auto_vec<rtx> clobber_rvec;
3005 HARD_REG_SET clobbered_regs;
3006 CLEAR_HARD_REG_SET (clobbered_regs);
3007
3008 if (unsigned n = gimple_asm_nclobbers (stmt))
3009 {
3010 clobber_rvec.reserve (n);
3011 for (i = 0; i < n; i++)
3012 {
3013 tree t = gimple_asm_clobber_op (stmt, i);
3014 const char *regname = TREE_STRING_POINTER (TREE_VALUE (t));
3015 int nregs, j;
3016
3017 j = decode_reg_name_and_count (regname, &nregs);
3018 if (j < 0)
3019 {
3020 if (j == -2)
3021 {
3022 /* ??? Diagnose during gimplification? */
3023 error ("unknown register name %qs in %<asm%>", regname);
3024 }
3025 else if (j == -4)
3026 {
3027 rtx x = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
3028 clobber_rvec.safe_push (x);
3029 }
3030 else
3031 {
3032 /* Otherwise we should have -1 == empty string
3033 or -3 == cc, which is not a register. */
3034 gcc_assert (j == -1 || j == -3);
3035 }
3036 }
3037 else
3038 for (int reg = j; reg < j + nregs; reg++)
3039 {
3040 if (!asm_clobber_reg_is_valid (reg, nregs, regname))
3041 return;
3042
3043 SET_HARD_REG_BIT (clobbered_regs, reg);
3044 rtx x = gen_rtx_REG (reg_raw_mode[reg], reg);
3045 clobber_rvec.safe_push (x);
3046 }
3047 }
3048 }
3049
3050 /* First pass over inputs and outputs checks validity and sets
3051 mark_addressable if needed. */
3052 /* ??? Diagnose during gimplification? */
3053
3054 for (i = 0; i < noutputs; ++i)
3055 {
3056 tree val = output_tvec[i];
3057 tree type = TREE_TYPE (val);
3058 const char *constraint;
3059 bool is_inout;
3060 bool allows_reg;
3061 bool allows_mem;
3062
3063 /* Try to parse the output constraint. If that fails, there's
3064 no point in going further. */
3065 constraint = constraints[i];
3066 if (!parse_output_constraint (&constraint, i, ninputs, noutputs,
3067 &allows_mem, &allows_reg, &is_inout))
3068 return;
3069
3070 /* If the output is a hard register, verify it doesn't conflict with
3071 any other operand's possible hard register use. */
3072 if (DECL_P (val)
3073 && REG_P (DECL_RTL (val))
3074 && HARD_REGISTER_P (DECL_RTL (val)))
3075 {
3076 unsigned j, output_hregno = REGNO (DECL_RTL (val));
3077 bool early_clobber_p = strchr (constraints[i], '&') != NULL;
3078 unsigned long match;
3079
3080 /* Verify the other outputs do not use the same hard register. */
3081 for (j = i + 1; j < noutputs; ++j)
3082 if (DECL_P (output_tvec[j])
3083 && REG_P (DECL_RTL (output_tvec[j]))
3084 && HARD_REGISTER_P (DECL_RTL (output_tvec[j]))
3085 && output_hregno == REGNO (DECL_RTL (output_tvec[j])))
3086 error ("invalid hard register usage between output operands");
3087
3088 /* Verify matching constraint operands use the same hard register
3089 and that the non-matching constraint operands do not use the same
3090 hard register if the output is an early clobber operand. */
3091 for (j = 0; j < ninputs; ++j)
3092 if (DECL_P (input_tvec[j])
3093 && REG_P (DECL_RTL (input_tvec[j]))
3094 && HARD_REGISTER_P (DECL_RTL (input_tvec[j])))
3095 {
3096 unsigned input_hregno = REGNO (DECL_RTL (input_tvec[j]));
3097 switch (*constraints[j + noutputs])
3098 {
3099 case '0': case '1': case '2': case '3': case '4':
3100 case '5': case '6': case '7': case '8': case '9':
3101 match = strtoul (constraints[j + noutputs], NULL, 10);
3102 break;
3103 default:
3104 match = ULONG_MAX;
3105 break;
3106 }
3107 if (i == match
3108 && output_hregno != input_hregno)
3109 error ("invalid hard register usage between output operand "
3110 "and matching constraint operand");
3111 else if (early_clobber_p
3112 && i != match
3113 && output_hregno == input_hregno)
3114 error ("invalid hard register usage between earlyclobber "
3115 "operand and input operand");
3116 }
3117 }
3118
3119 if (! allows_reg
3120 && (allows_mem
3121 || is_inout
3122 || (DECL_P (val)
3123 && REG_P (DECL_RTL (val))
3124 && GET_MODE (DECL_RTL (val)) != TYPE_MODE (type))))
3125 mark_addressable (val);
3126 }
3127
3128 for (i = 0; i < ninputs; ++i)
3129 {
3130 bool allows_reg, allows_mem;
3131 const char *constraint;
3132
3133 constraint = constraints[i + noutputs];
3134 if (! parse_input_constraint (&constraint, i, ninputs, noutputs, 0,
3135 constraints.address (),
3136 &allows_mem, &allows_reg))
3137 return;
3138
3139 if (! allows_reg && allows_mem)
3140 mark_addressable (input_tvec[i]);
3141 }
3142
3143 /* Second pass evaluates arguments. */
3144
3145 /* Make sure stack is consistent for asm goto. */
3146 if (nlabels > 0)
3147 do_pending_stack_adjust ();
3148 int old_generating_concat_p = generating_concat_p;
3149
3150 /* Vector of RTX's of evaluated output operands. */
3151 auto_vec<rtx, MAX_RECOG_OPERANDS> output_rvec;
3152 auto_vec<int, MAX_RECOG_OPERANDS> inout_opnum;
3153 rtx_insn *after_rtl_seq = NULL, *after_rtl_end = NULL;
3154
3155 output_rvec.safe_grow (noutputs);
3156
3157 for (i = 0; i < noutputs; ++i)
3158 {
3159 tree val = output_tvec[i];
3160 tree type = TREE_TYPE (val);
3161 bool is_inout, allows_reg, allows_mem, ok;
3162 rtx op;
3163
3164 ok = parse_output_constraint (&constraints[i], i, ninputs,
3165 noutputs, &allows_mem, &allows_reg,
3166 &is_inout);
3167 gcc_assert (ok);
3168
3169 /* If an output operand is not a decl or indirect ref and our constraint
3170 allows a register, make a temporary to act as an intermediate.
3171 Make the asm insn write into that, then we will copy it to
3172 the real output operand. Likewise for promoted variables. */
3173
3174 generating_concat_p = 0;
3175
3176 if ((TREE_CODE (val) == INDIRECT_REF && allows_mem)
3177 || (DECL_P (val)
3178 && (allows_mem || REG_P (DECL_RTL (val)))
3179 && ! (REG_P (DECL_RTL (val))
3180 && GET_MODE (DECL_RTL (val)) != TYPE_MODE (type)))
3181 || ! allows_reg
3182 || is_inout
3183 || TREE_ADDRESSABLE (type))
3184 {
3185 op = expand_expr (val, NULL_RTX, VOIDmode,
3186 !allows_reg ? EXPAND_MEMORY : EXPAND_WRITE);
3187 if (MEM_P (op))
3188 op = validize_mem (op);
3189
3190 if (! allows_reg && !MEM_P (op))
3191 error ("output number %d not directly addressable", i);
3192 if ((! allows_mem && MEM_P (op) && GET_MODE (op) != BLKmode)
3193 || GET_CODE (op) == CONCAT)
3194 {
3195 rtx old_op = op;
3196 op = gen_reg_rtx (GET_MODE (op));
3197
3198 generating_concat_p = old_generating_concat_p;
3199
3200 if (is_inout)
3201 emit_move_insn (op, old_op);
3202
3203 push_to_sequence2 (after_rtl_seq, after_rtl_end);
3204 emit_move_insn (old_op, op);
3205 after_rtl_seq = get_insns ();
3206 after_rtl_end = get_last_insn ();
3207 end_sequence ();
3208 }
3209 }
3210 else
3211 {
3212 op = assign_temp (type, 0, 1);
3213 op = validize_mem (op);
3214 if (!MEM_P (op) && TREE_CODE (val) == SSA_NAME)
3215 set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (val), op);
3216
3217 generating_concat_p = old_generating_concat_p;
3218
3219 push_to_sequence2 (after_rtl_seq, after_rtl_end);
3220 expand_assignment (val, make_tree (type, op), false);
3221 after_rtl_seq = get_insns ();
3222 after_rtl_end = get_last_insn ();
3223 end_sequence ();
3224 }
3225 output_rvec[i] = op;
3226
3227 if (is_inout)
3228 inout_opnum.safe_push (i);
3229 }
3230
3231 auto_vec<rtx, MAX_RECOG_OPERANDS> input_rvec;
3232 auto_vec<machine_mode, MAX_RECOG_OPERANDS> input_mode;
3233
3234 input_rvec.safe_grow (ninputs);
3235 input_mode.safe_grow (ninputs);
3236
3237 generating_concat_p = 0;
3238
3239 for (i = 0; i < ninputs; ++i)
3240 {
3241 tree val = input_tvec[i];
3242 tree type = TREE_TYPE (val);
3243 bool allows_reg, allows_mem, ok;
3244 const char *constraint;
3245 rtx op;
3246
3247 constraint = constraints[i + noutputs];
3248 ok = parse_input_constraint (&constraint, i, ninputs, noutputs, 0,
3249 constraints.address (),
3250 &allows_mem, &allows_reg);
3251 gcc_assert (ok);
3252
3253 /* EXPAND_INITIALIZER will not generate code for valid initializer
3254 constants, but will still generate code for other types of operand.
3255 This is the behavior we want for constant constraints. */
3256 op = expand_expr (val, NULL_RTX, VOIDmode,
3257 allows_reg ? EXPAND_NORMAL
3258 : allows_mem ? EXPAND_MEMORY
3259 : EXPAND_INITIALIZER);
3260
3261 /* Never pass a CONCAT to an ASM. */
3262 if (GET_CODE (op) == CONCAT)
3263 op = force_reg (GET_MODE (op), op);
3264 else if (MEM_P (op))
3265 op = validize_mem (op);
3266
3267 if (asm_operand_ok (op, constraint, NULL) <= 0)
3268 {
3269 if (allows_reg && TYPE_MODE (type) != BLKmode)
3270 op = force_reg (TYPE_MODE (type), op);
3271 else if (!allows_mem)
3272 warning (0, "%<asm%> operand %d probably does not match "
3273 "constraints",
3274 i + noutputs);
3275 else if (MEM_P (op))
3276 {
3277 /* We won't recognize either volatile memory or memory
3278 with a queued address as available a memory_operand
3279 at this point. Ignore it: clearly this *is* a memory. */
3280 }
3281 else
3282 gcc_unreachable ();
3283 }
3284 input_rvec[i] = op;
3285 input_mode[i] = TYPE_MODE (type);
3286 }
3287
3288 /* For in-out operands, copy output rtx to input rtx. */
3289 unsigned ninout = inout_opnum.length();
3290 for (i = 0; i < ninout; i++)
3291 {
3292 int j = inout_opnum[i];
3293 rtx o = output_rvec[j];
3294
3295 input_rvec.safe_push (o);
3296 input_mode.safe_push (GET_MODE (o));
3297
3298 char buffer[16];
3299 sprintf (buffer, "%d", j);
3300 constraints.safe_push (ggc_strdup (buffer));
3301 }
3302 ninputs += ninout;
3303
3304 /* Sometimes we wish to automatically clobber registers across an asm.
3305 Case in point is when the i386 backend moved from cc0 to a hard reg --
3306 maintaining source-level compatibility means automatically clobbering
3307 the flags register. */
3308 rtx_insn *after_md_seq = NULL;
3309 if (targetm.md_asm_adjust)
3310 after_md_seq = targetm.md_asm_adjust (output_rvec, input_rvec,
3311 constraints, clobber_rvec,
3312 clobbered_regs);
3313
3314 /* Do not allow the hook to change the output and input count,
3315 lest it mess up the operand numbering. */
3316 gcc_assert (output_rvec.length() == noutputs);
3317 gcc_assert (input_rvec.length() == ninputs);
3318 gcc_assert (constraints.length() == noutputs + ninputs);
3319
3320 /* But it certainly can adjust the clobbers. */
3321 unsigned nclobbers = clobber_rvec.length ();
3322
3323 /* Third pass checks for easy conflicts. */
3324 /* ??? Why are we doing this on trees instead of rtx. */
3325
3326 bool clobber_conflict_found = 0;
3327 for (i = 0; i < noutputs; ++i)
3328 if (tree_conflicts_with_clobbers_p (output_tvec[i], &clobbered_regs))
3329 clobber_conflict_found = 1;
3330 for (i = 0; i < ninputs - ninout; ++i)
3331 if (tree_conflicts_with_clobbers_p (input_tvec[i], &clobbered_regs))
3332 clobber_conflict_found = 1;
3333
3334 /* Make vectors for the expression-rtx, constraint strings,
3335 and named operands. */
3336
3337 rtvec argvec = rtvec_alloc (ninputs);
3338 rtvec constraintvec = rtvec_alloc (ninputs);
3339 rtvec labelvec = rtvec_alloc (nlabels);
3340
3341 rtx body = gen_rtx_ASM_OPERANDS ((noutputs == 0 ? VOIDmode
3342 : GET_MODE (output_rvec[0])),
3343 ggc_strdup (gimple_asm_string (stmt)),
3344 "", 0, argvec, constraintvec,
3345 labelvec, locus);
3346 MEM_VOLATILE_P (body) = gimple_asm_volatile_p (stmt);
3347
3348 for (i = 0; i < ninputs; ++i)
3349 {
3350 ASM_OPERANDS_INPUT (body, i) = input_rvec[i];
3351 ASM_OPERANDS_INPUT_CONSTRAINT_EXP (body, i)
3352 = gen_rtx_ASM_INPUT_loc (input_mode[i],
3353 constraints[i + noutputs],
3354 locus);
3355 }
3356
3357 /* Copy labels to the vector. */
3358 rtx_code_label *fallthru_label = NULL;
3359 if (nlabels > 0)
3360 {
3361 basic_block fallthru_bb = NULL;
3362 edge fallthru = find_fallthru_edge (gimple_bb (stmt)->succs);
3363 if (fallthru)
3364 fallthru_bb = fallthru->dest;
3365
3366 for (i = 0; i < nlabels; ++i)
3367 {
3368 tree label = TREE_VALUE (gimple_asm_label_op (stmt, i));
3369 rtx_insn *r;
3370 /* If asm goto has any labels in the fallthru basic block, use
3371 a label that we emit immediately after the asm goto. Expansion
3372 may insert further instructions into the same basic block after
3373 asm goto and if we don't do this, insertion of instructions on
3374 the fallthru edge might misbehave. See PR58670. */
3375 if (fallthru_bb && label_to_block (cfun, label) == fallthru_bb)
3376 {
3377 if (fallthru_label == NULL_RTX)
3378 fallthru_label = gen_label_rtx ();
3379 r = fallthru_label;
3380 }
3381 else
3382 r = label_rtx (label);
3383 ASM_OPERANDS_LABEL (body, i) = gen_rtx_LABEL_REF (Pmode, r);
3384 }
3385 }
3386
3387 /* Now, for each output, construct an rtx
3388 (set OUTPUT (asm_operands INSN OUTPUTCONSTRAINT OUTPUTNUMBER
3389 ARGVEC CONSTRAINTS OPNAMES))
3390 If there is more than one, put them inside a PARALLEL. */
3391
3392 if (nlabels > 0 && nclobbers == 0)
3393 {
3394 gcc_assert (noutputs == 0);
3395 emit_jump_insn (body);
3396 }
3397 else if (noutputs == 0 && nclobbers == 0)
3398 {
3399 /* No output operands: put in a raw ASM_OPERANDS rtx. */
3400 emit_insn (body);
3401 }
3402 else if (noutputs == 1 && nclobbers == 0)
3403 {
3404 ASM_OPERANDS_OUTPUT_CONSTRAINT (body) = constraints[0];
3405 emit_insn (gen_rtx_SET (output_rvec[0], body));
3406 }
3407 else
3408 {
3409 rtx obody = body;
3410 int num = noutputs;
3411
3412 if (num == 0)
3413 num = 1;
3414
3415 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num + nclobbers));
3416
3417 /* For each output operand, store a SET. */
3418 for (i = 0; i < noutputs; ++i)
3419 {
3420 rtx src, o = output_rvec[i];
3421 if (i == 0)
3422 {
3423 ASM_OPERANDS_OUTPUT_CONSTRAINT (obody) = constraints[0];
3424 src = obody;
3425 }
3426 else
3427 {
3428 src = gen_rtx_ASM_OPERANDS (GET_MODE (o),
3429 ASM_OPERANDS_TEMPLATE (obody),
3430 constraints[i], i, argvec,
3431 constraintvec, labelvec, locus);
3432 MEM_VOLATILE_P (src) = gimple_asm_volatile_p (stmt);
3433 }
3434 XVECEXP (body, 0, i) = gen_rtx_SET (o, src);
3435 }
3436
3437 /* If there are no outputs (but there are some clobbers)
3438 store the bare ASM_OPERANDS into the PARALLEL. */
3439 if (i == 0)
3440 XVECEXP (body, 0, i++) = obody;
3441
3442 /* Store (clobber REG) for each clobbered register specified. */
3443 for (unsigned j = 0; j < nclobbers; ++j)
3444 {
3445 rtx clobbered_reg = clobber_rvec[j];
3446
3447 /* Do sanity check for overlap between clobbers and respectively
3448 input and outputs that hasn't been handled. Such overlap
3449 should have been detected and reported above. */
3450 if (!clobber_conflict_found && REG_P (clobbered_reg))
3451 {
3452 /* We test the old body (obody) contents to avoid
3453 tripping over the under-construction body. */
3454 for (unsigned k = 0; k < noutputs; ++k)
3455 if (reg_overlap_mentioned_p (clobbered_reg, output_rvec[k]))
3456 internal_error ("%<asm%> clobber conflict with "
3457 "output operand");
3458
3459 for (unsigned k = 0; k < ninputs - ninout; ++k)
3460 if (reg_overlap_mentioned_p (clobbered_reg, input_rvec[k]))
3461 internal_error ("%<asm%> clobber conflict with "
3462 "input operand");
3463 }
3464
3465 XVECEXP (body, 0, i++) = gen_rtx_CLOBBER (VOIDmode, clobbered_reg);
3466 }
3467
3468 if (nlabels > 0)
3469 emit_jump_insn (body);
3470 else
3471 emit_insn (body);
3472 }
3473
3474 generating_concat_p = old_generating_concat_p;
3475
3476 if (fallthru_label)
3477 emit_label (fallthru_label);
3478
3479 if (after_md_seq)
3480 emit_insn (after_md_seq);
3481 if (after_rtl_seq)
3482 emit_insn (after_rtl_seq);
3483
3484 free_temp_slots ();
3485 crtl->has_asm_statement = 1;
3486 }
3487
3488 /* Emit code to jump to the address
3489 specified by the pointer expression EXP. */
3490
3491 static void
3492 expand_computed_goto (tree exp)
3493 {
3494 rtx x = expand_normal (exp);
3495
3496 do_pending_stack_adjust ();
3497 emit_indirect_jump (x);
3498 }
3499
3500 /* Generate RTL code for a `goto' statement with target label LABEL.
3501 LABEL should be a LABEL_DECL tree node that was or will later be
3502 defined with `expand_label'. */
3503
3504 static void
3505 expand_goto (tree label)
3506 {
3507 if (flag_checking)
3508 {
3509 /* Check for a nonlocal goto to a containing function. Should have
3510 gotten translated to __builtin_nonlocal_goto. */
3511 tree context = decl_function_context (label);
3512 gcc_assert (!context || context == current_function_decl);
3513 }
3514
3515 emit_jump (jump_target_rtx (label));
3516 }
3517
3518 /* Output a return with no value. */
3519
3520 static void
3521 expand_null_return_1 (void)
3522 {
3523 clear_pending_stack_adjust ();
3524 do_pending_stack_adjust ();
3525 emit_jump (return_label);
3526 }
3527
3528 /* Generate RTL to return from the current function, with no value.
3529 (That is, we do not do anything about returning any value.) */
3530
3531 void
3532 expand_null_return (void)
3533 {
3534 /* If this function was declared to return a value, but we
3535 didn't, clobber the return registers so that they are not
3536 propagated live to the rest of the function. */
3537 clobber_return_register ();
3538
3539 expand_null_return_1 ();
3540 }
3541
3542 /* Generate RTL to return from the current function, with value VAL. */
3543
3544 static void
3545 expand_value_return (rtx val)
3546 {
3547 /* Copy the value to the return location unless it's already there. */
3548
3549 tree decl = DECL_RESULT (current_function_decl);
3550 rtx return_reg = DECL_RTL (decl);
3551 if (return_reg != val)
3552 {
3553 tree funtype = TREE_TYPE (current_function_decl);
3554 tree type = TREE_TYPE (decl);
3555 int unsignedp = TYPE_UNSIGNED (type);
3556 machine_mode old_mode = DECL_MODE (decl);
3557 machine_mode mode;
3558 if (DECL_BY_REFERENCE (decl))
3559 mode = promote_function_mode (type, old_mode, &unsignedp, funtype, 2);
3560 else
3561 mode = promote_function_mode (type, old_mode, &unsignedp, funtype, 1);
3562
3563 if (mode != old_mode)
3564 val = convert_modes (mode, old_mode, val, unsignedp);
3565
3566 if (GET_CODE (return_reg) == PARALLEL)
3567 emit_group_load (return_reg, val, type, int_size_in_bytes (type));
3568 else
3569 emit_move_insn (return_reg, val);
3570 }
3571
3572 expand_null_return_1 ();
3573 }
3574
3575 /* Generate RTL to evaluate the expression RETVAL and return it
3576 from the current function. */
3577
3578 static void
3579 expand_return (tree retval)
3580 {
3581 rtx result_rtl;
3582 rtx val = 0;
3583 tree retval_rhs;
3584
3585 /* If function wants no value, give it none. */
3586 if (TREE_CODE (TREE_TYPE (TREE_TYPE (current_function_decl))) == VOID_TYPE)
3587 {
3588 expand_normal (retval);
3589 expand_null_return ();
3590 return;
3591 }
3592
3593 if (retval == error_mark_node)
3594 {
3595 /* Treat this like a return of no value from a function that
3596 returns a value. */
3597 expand_null_return ();
3598 return;
3599 }
3600 else if ((TREE_CODE (retval) == MODIFY_EXPR
3601 || TREE_CODE (retval) == INIT_EXPR)
3602 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
3603 retval_rhs = TREE_OPERAND (retval, 1);
3604 else
3605 retval_rhs = retval;
3606
3607 result_rtl = DECL_RTL (DECL_RESULT (current_function_decl));
3608
3609 /* If we are returning the RESULT_DECL, then the value has already
3610 been stored into it, so we don't have to do anything special. */
3611 if (TREE_CODE (retval_rhs) == RESULT_DECL)
3612 expand_value_return (result_rtl);
3613
3614 /* If the result is an aggregate that is being returned in one (or more)
3615 registers, load the registers here. */
3616
3617 else if (retval_rhs != 0
3618 && TYPE_MODE (TREE_TYPE (retval_rhs)) == BLKmode
3619 && REG_P (result_rtl))
3620 {
3621 val = copy_blkmode_to_reg (GET_MODE (result_rtl), retval_rhs);
3622 if (val)
3623 {
3624 /* Use the mode of the result value on the return register. */
3625 PUT_MODE (result_rtl, GET_MODE (val));
3626 expand_value_return (val);
3627 }
3628 else
3629 expand_null_return ();
3630 }
3631 else if (retval_rhs != 0
3632 && !VOID_TYPE_P (TREE_TYPE (retval_rhs))
3633 && (REG_P (result_rtl)
3634 || (GET_CODE (result_rtl) == PARALLEL)))
3635 {
3636 /* Compute the return value into a temporary (usually a pseudo reg). */
3637 val
3638 = assign_temp (TREE_TYPE (DECL_RESULT (current_function_decl)), 0, 1);
3639 val = expand_expr (retval_rhs, val, GET_MODE (val), EXPAND_NORMAL);
3640 val = force_not_mem (val);
3641 expand_value_return (val);
3642 }
3643 else
3644 {
3645 /* No hard reg used; calculate value into hard return reg. */
3646 expand_expr (retval, const0_rtx, VOIDmode, EXPAND_NORMAL);
3647 expand_value_return (result_rtl);
3648 }
3649 }
3650
3651 /* Expand a clobber of LHS. If LHS is stored it in a multi-part
3652 register, tell the rtl optimizers that its value is no longer
3653 needed. */
3654
3655 static void
3656 expand_clobber (tree lhs)
3657 {
3658 if (DECL_P (lhs))
3659 {
3660 rtx decl_rtl = DECL_RTL_IF_SET (lhs);
3661 if (decl_rtl && REG_P (decl_rtl))
3662 {
3663 machine_mode decl_mode = GET_MODE (decl_rtl);
3664 if (maybe_gt (GET_MODE_SIZE (decl_mode),
3665 REGMODE_NATURAL_SIZE (decl_mode)))
3666 emit_clobber (decl_rtl);
3667 }
3668 }
3669 }
3670
3671 /* A subroutine of expand_gimple_stmt, expanding one gimple statement
3672 STMT that doesn't require special handling for outgoing edges. That
3673 is no tailcalls and no GIMPLE_COND. */
3674
3675 static void
3676 expand_gimple_stmt_1 (gimple *stmt)
3677 {
3678 tree op0;
3679
3680 set_curr_insn_location (gimple_location (stmt));
3681
3682 switch (gimple_code (stmt))
3683 {
3684 case GIMPLE_GOTO:
3685 op0 = gimple_goto_dest (stmt);
3686 if (TREE_CODE (op0) == LABEL_DECL)
3687 expand_goto (op0);
3688 else
3689 expand_computed_goto (op0);
3690 break;
3691 case GIMPLE_LABEL:
3692 expand_label (gimple_label_label (as_a <glabel *> (stmt)));
3693 break;
3694 case GIMPLE_NOP:
3695 case GIMPLE_PREDICT:
3696 break;
3697 case GIMPLE_SWITCH:
3698 {
3699 gswitch *swtch = as_a <gswitch *> (stmt);
3700 if (gimple_switch_num_labels (swtch) == 1)
3701 expand_goto (CASE_LABEL (gimple_switch_default_label (swtch)));
3702 else
3703 expand_case (swtch);
3704 }
3705 break;
3706 case GIMPLE_ASM:
3707 expand_asm_stmt (as_a <gasm *> (stmt));
3708 break;
3709 case GIMPLE_CALL:
3710 expand_call_stmt (as_a <gcall *> (stmt));
3711 break;
3712
3713 case GIMPLE_RETURN:
3714 {
3715 op0 = gimple_return_retval (as_a <greturn *> (stmt));
3716
3717 /* If a return doesn't have a location, it very likely represents
3718 multiple user returns so we cannot let it inherit the location
3719 of the last statement of the previous basic block in RTL. */
3720 if (!gimple_has_location (stmt))
3721 set_curr_insn_location (cfun->function_end_locus);
3722
3723 if (op0 && op0 != error_mark_node)
3724 {
3725 tree result = DECL_RESULT (current_function_decl);
3726
3727 /* If we are not returning the current function's RESULT_DECL,
3728 build an assignment to it. */
3729 if (op0 != result)
3730 {
3731 /* I believe that a function's RESULT_DECL is unique. */
3732 gcc_assert (TREE_CODE (op0) != RESULT_DECL);
3733
3734 /* ??? We'd like to use simply expand_assignment here,
3735 but this fails if the value is of BLKmode but the return
3736 decl is a register. expand_return has special handling
3737 for this combination, which eventually should move
3738 to common code. See comments there. Until then, let's
3739 build a modify expression :-/ */
3740 op0 = build2 (MODIFY_EXPR, TREE_TYPE (result),
3741 result, op0);
3742 }
3743 }
3744
3745 if (!op0)
3746 expand_null_return ();
3747 else
3748 expand_return (op0);
3749 }
3750 break;
3751
3752 case GIMPLE_ASSIGN:
3753 {
3754 gassign *assign_stmt = as_a <gassign *> (stmt);
3755 tree lhs = gimple_assign_lhs (assign_stmt);
3756
3757 /* Tree expand used to fiddle with |= and &= of two bitfield
3758 COMPONENT_REFs here. This can't happen with gimple, the LHS
3759 of binary assigns must be a gimple reg. */
3760
3761 if (TREE_CODE (lhs) != SSA_NAME
3762 || get_gimple_rhs_class (gimple_expr_code (stmt))
3763 == GIMPLE_SINGLE_RHS)
3764 {
3765 tree rhs = gimple_assign_rhs1 (assign_stmt);
3766 gcc_assert (get_gimple_rhs_class (gimple_expr_code (stmt))
3767 == GIMPLE_SINGLE_RHS);
3768 if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (rhs)
3769 /* Do not put locations on possibly shared trees. */
3770 && !is_gimple_min_invariant (rhs))
3771 SET_EXPR_LOCATION (rhs, gimple_location (stmt));
3772 if (TREE_CLOBBER_P (rhs))
3773 /* This is a clobber to mark the going out of scope for
3774 this LHS. */
3775 expand_clobber (lhs);
3776 else
3777 expand_assignment (lhs, rhs,
3778 gimple_assign_nontemporal_move_p (
3779 assign_stmt));
3780 }
3781 else
3782 {
3783 rtx target, temp;
3784 bool nontemporal = gimple_assign_nontemporal_move_p (assign_stmt);
3785 struct separate_ops ops;
3786 bool promoted = false;
3787
3788 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3789 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
3790 promoted = true;
3791
3792 ops.code = gimple_assign_rhs_code (assign_stmt);
3793 ops.type = TREE_TYPE (lhs);
3794 switch (get_gimple_rhs_class (ops.code))
3795 {
3796 case GIMPLE_TERNARY_RHS:
3797 ops.op2 = gimple_assign_rhs3 (assign_stmt);
3798 /* Fallthru */
3799 case GIMPLE_BINARY_RHS:
3800 ops.op1 = gimple_assign_rhs2 (assign_stmt);
3801 /* Fallthru */
3802 case GIMPLE_UNARY_RHS:
3803 ops.op0 = gimple_assign_rhs1 (assign_stmt);
3804 break;
3805 default:
3806 gcc_unreachable ();
3807 }
3808 ops.location = gimple_location (stmt);
3809
3810 /* If we want to use a nontemporal store, force the value to
3811 register first. If we store into a promoted register,
3812 don't directly expand to target. */
3813 temp = nontemporal || promoted ? NULL_RTX : target;
3814 temp = expand_expr_real_2 (&ops, temp, GET_MODE (target),
3815 EXPAND_NORMAL);
3816
3817 if (temp == target)
3818 ;
3819 else if (promoted)
3820 {
3821 int unsignedp = SUBREG_PROMOTED_SIGN (target);
3822 /* If TEMP is a VOIDmode constant, use convert_modes to make
3823 sure that we properly convert it. */
3824 if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode)
3825 {
3826 temp = convert_modes (GET_MODE (target),
3827 TYPE_MODE (ops.type),
3828 temp, unsignedp);
3829 temp = convert_modes (GET_MODE (SUBREG_REG (target)),
3830 GET_MODE (target), temp, unsignedp);
3831 }
3832
3833 convert_move (SUBREG_REG (target), temp, unsignedp);
3834 }
3835 else if (nontemporal && emit_storent_insn (target, temp))
3836 ;
3837 else
3838 {
3839 temp = force_operand (temp, target);
3840 if (temp != target)
3841 emit_move_insn (target, temp);
3842 }
3843 }
3844 }
3845 break;
3846
3847 default:
3848 gcc_unreachable ();
3849 }
3850 }
3851
3852 /* Expand one gimple statement STMT and return the last RTL instruction
3853 before any of the newly generated ones.
3854
3855 In addition to generating the necessary RTL instructions this also
3856 sets REG_EH_REGION notes if necessary and sets the current source
3857 location for diagnostics. */
3858
3859 static rtx_insn *
3860 expand_gimple_stmt (gimple *stmt)
3861 {
3862 location_t saved_location = input_location;
3863 rtx_insn *last = get_last_insn ();
3864 int lp_nr;
3865
3866 gcc_assert (cfun);
3867
3868 /* We need to save and restore the current source location so that errors
3869 discovered during expansion are emitted with the right location. But
3870 it would be better if the diagnostic routines used the source location
3871 embedded in the tree nodes rather than globals. */
3872 if (gimple_has_location (stmt))
3873 input_location = gimple_location (stmt);
3874
3875 expand_gimple_stmt_1 (stmt);
3876
3877 /* Free any temporaries used to evaluate this statement. */
3878 free_temp_slots ();
3879
3880 input_location = saved_location;
3881
3882 /* Mark all insns that may trap. */
3883 lp_nr = lookup_stmt_eh_lp (stmt);
3884 if (lp_nr)
3885 {
3886 rtx_insn *insn;
3887 for (insn = next_real_insn (last); insn;
3888 insn = next_real_insn (insn))
3889 {
3890 if (! find_reg_note (insn, REG_EH_REGION, NULL_RTX)
3891 /* If we want exceptions for non-call insns, any
3892 may_trap_p instruction may throw. */
3893 && GET_CODE (PATTERN (insn)) != CLOBBER
3894 && GET_CODE (PATTERN (insn)) != CLOBBER_HIGH
3895 && GET_CODE (PATTERN (insn)) != USE
3896 && insn_could_throw_p (insn))
3897 make_reg_eh_region_note (insn, 0, lp_nr);
3898 }
3899 }
3900
3901 return last;
3902 }
3903
3904 /* A subroutine of expand_gimple_basic_block. Expand one GIMPLE_CALL
3905 that has CALL_EXPR_TAILCALL set. Returns non-null if we actually
3906 generated a tail call (something that might be denied by the ABI
3907 rules governing the call; see calls.c).
3908
3909 Sets CAN_FALLTHRU if we generated a *conditional* tail call, and
3910 can still reach the rest of BB. The case here is __builtin_sqrt,
3911 where the NaN result goes through the external function (with a
3912 tailcall) and the normal result happens via a sqrt instruction. */
3913
3914 static basic_block
3915 expand_gimple_tailcall (basic_block bb, gcall *stmt, bool *can_fallthru)
3916 {
3917 rtx_insn *last2, *last;
3918 edge e;
3919 edge_iterator ei;
3920 profile_probability probability;
3921
3922 last2 = last = expand_gimple_stmt (stmt);
3923
3924 for (last = NEXT_INSN (last); last; last = NEXT_INSN (last))
3925 if (CALL_P (last) && SIBLING_CALL_P (last))
3926 goto found;
3927
3928 maybe_dump_rtl_for_gimple_stmt (stmt, last2);
3929
3930 *can_fallthru = true;
3931 return NULL;
3932
3933 found:
3934 /* ??? Wouldn't it be better to just reset any pending stack adjust?
3935 Any instructions emitted here are about to be deleted. */
3936 do_pending_stack_adjust ();
3937
3938 /* Remove any non-eh, non-abnormal edges that don't go to exit. */
3939 /* ??? I.e. the fallthrough edge. HOWEVER! If there were to be
3940 EH or abnormal edges, we shouldn't have created a tail call in
3941 the first place. So it seems to me we should just be removing
3942 all edges here, or redirecting the existing fallthru edge to
3943 the exit block. */
3944
3945 probability = profile_probability::never ();
3946
3947 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3948 {
3949 if (!(e->flags & (EDGE_ABNORMAL | EDGE_EH)))
3950 {
3951 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3952 e->dest->count -= e->count ();
3953 probability += e->probability;
3954 remove_edge (e);
3955 }
3956 else
3957 ei_next (&ei);
3958 }
3959
3960 /* This is somewhat ugly: the call_expr expander often emits instructions
3961 after the sibcall (to perform the function return). These confuse the
3962 find_many_sub_basic_blocks code, so we need to get rid of these. */
3963 last = NEXT_INSN (last);
3964 gcc_assert (BARRIER_P (last));
3965
3966 *can_fallthru = false;
3967 while (NEXT_INSN (last))
3968 {
3969 /* For instance an sqrt builtin expander expands if with
3970 sibcall in the then and label for `else`. */
3971 if (LABEL_P (NEXT_INSN (last)))
3972 {
3973 *can_fallthru = true;
3974 break;
3975 }
3976 delete_insn (NEXT_INSN (last));
3977 }
3978
3979 e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_ABNORMAL
3980 | EDGE_SIBCALL);
3981 e->probability = probability;
3982 BB_END (bb) = last;
3983 update_bb_for_insn (bb);
3984
3985 if (NEXT_INSN (last))
3986 {
3987 bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb);
3988
3989 last = BB_END (bb);
3990 if (BARRIER_P (last))
3991 BB_END (bb) = PREV_INSN (last);
3992 }
3993
3994 maybe_dump_rtl_for_gimple_stmt (stmt, last2);
3995
3996 return bb;
3997 }
3998
3999 /* Return the difference between the floor and the truncated result of
4000 a signed division by OP1 with remainder MOD. */
4001 static rtx
4002 floor_sdiv_adjust (machine_mode mode, rtx mod, rtx op1)
4003 {
4004 /* (mod != 0 ? (op1 / mod < 0 ? -1 : 0) : 0) */
4005 return gen_rtx_IF_THEN_ELSE
4006 (mode, gen_rtx_NE (BImode, mod, const0_rtx),
4007 gen_rtx_IF_THEN_ELSE
4008 (mode, gen_rtx_LT (BImode,
4009 gen_rtx_DIV (mode, op1, mod),
4010 const0_rtx),
4011 constm1_rtx, const0_rtx),
4012 const0_rtx);
4013 }
4014
4015 /* Return the difference between the ceil and the truncated result of
4016 a signed division by OP1 with remainder MOD. */
4017 static rtx
4018 ceil_sdiv_adjust (machine_mode mode, rtx mod, rtx op1)
4019 {
4020 /* (mod != 0 ? (op1 / mod > 0 ? 1 : 0) : 0) */
4021 return gen_rtx_IF_THEN_ELSE
4022 (mode, gen_rtx_NE (BImode, mod, const0_rtx),
4023 gen_rtx_IF_THEN_ELSE
4024 (mode, gen_rtx_GT (BImode,
4025 gen_rtx_DIV (mode, op1, mod),
4026 const0_rtx),
4027 const1_rtx, const0_rtx),
4028 const0_rtx);
4029 }
4030
4031 /* Return the difference between the ceil and the truncated result of
4032 an unsigned division by OP1 with remainder MOD. */
4033 static rtx
4034 ceil_udiv_adjust (machine_mode mode, rtx mod, rtx op1 ATTRIBUTE_UNUSED)
4035 {
4036 /* (mod != 0 ? 1 : 0) */
4037 return gen_rtx_IF_THEN_ELSE
4038 (mode, gen_rtx_NE (BImode, mod, const0_rtx),
4039 const1_rtx, const0_rtx);
4040 }
4041
4042 /* Return the difference between the rounded and the truncated result
4043 of a signed division by OP1 with remainder MOD. Halfway cases are
4044 rounded away from zero, rather than to the nearest even number. */
4045 static rtx
4046 round_sdiv_adjust (machine_mode mode, rtx mod, rtx op1)
4047 {
4048 /* (abs (mod) >= abs (op1) - abs (mod)
4049 ? (op1 / mod > 0 ? 1 : -1)
4050 : 0) */
4051 return gen_rtx_IF_THEN_ELSE
4052 (mode, gen_rtx_GE (BImode, gen_rtx_ABS (mode, mod),
4053 gen_rtx_MINUS (mode,
4054 gen_rtx_ABS (mode, op1),
4055 gen_rtx_ABS (mode, mod))),
4056 gen_rtx_IF_THEN_ELSE
4057 (mode, gen_rtx_GT (BImode,
4058 gen_rtx_DIV (mode, op1, mod),
4059 const0_rtx),
4060 const1_rtx, constm1_rtx),
4061 const0_rtx);
4062 }
4063
4064 /* Return the difference between the rounded and the truncated result
4065 of a unsigned division by OP1 with remainder MOD. Halfway cases
4066 are rounded away from zero, rather than to the nearest even
4067 number. */
4068 static rtx
4069 round_udiv_adjust (machine_mode mode, rtx mod, rtx op1)
4070 {
4071 /* (mod >= op1 - mod ? 1 : 0) */
4072 return gen_rtx_IF_THEN_ELSE
4073 (mode, gen_rtx_GE (BImode, mod,
4074 gen_rtx_MINUS (mode, op1, mod)),
4075 const1_rtx, const0_rtx);
4076 }
4077
4078 /* Convert X to MODE, that must be Pmode or ptr_mode, without emitting
4079 any rtl. */
4080
4081 static rtx
4082 convert_debug_memory_address (scalar_int_mode mode, rtx x,
4083 addr_space_t as)
4084 {
4085 #ifndef POINTERS_EXTEND_UNSIGNED
4086 gcc_assert (mode == Pmode
4087 || mode == targetm.addr_space.address_mode (as));
4088 gcc_assert (GET_MODE (x) == mode || GET_MODE (x) == VOIDmode);
4089 #else
4090 rtx temp;
4091
4092 gcc_assert (targetm.addr_space.valid_pointer_mode (mode, as));
4093
4094 if (GET_MODE (x) == mode || GET_MODE (x) == VOIDmode)
4095 return x;
4096
4097 /* X must have some form of address mode already. */
4098 scalar_int_mode xmode = as_a <scalar_int_mode> (GET_MODE (x));
4099 if (GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (xmode))
4100 x = lowpart_subreg (mode, x, xmode);
4101 else if (POINTERS_EXTEND_UNSIGNED > 0)
4102 x = gen_rtx_ZERO_EXTEND (mode, x);
4103 else if (!POINTERS_EXTEND_UNSIGNED)
4104 x = gen_rtx_SIGN_EXTEND (mode, x);
4105 else
4106 {
4107 switch (GET_CODE (x))
4108 {
4109 case SUBREG:
4110 if ((SUBREG_PROMOTED_VAR_P (x)
4111 || (REG_P (SUBREG_REG (x)) && REG_POINTER (SUBREG_REG (x)))
4112 || (GET_CODE (SUBREG_REG (x)) == PLUS
4113 && REG_P (XEXP (SUBREG_REG (x), 0))
4114 && REG_POINTER (XEXP (SUBREG_REG (x), 0))
4115 && CONST_INT_P (XEXP (SUBREG_REG (x), 1))))
4116 && GET_MODE (SUBREG_REG (x)) == mode)
4117 return SUBREG_REG (x);
4118 break;
4119 case LABEL_REF:
4120 temp = gen_rtx_LABEL_REF (mode, label_ref_label (x));
4121 LABEL_REF_NONLOCAL_P (temp) = LABEL_REF_NONLOCAL_P (x);
4122 return temp;
4123 case SYMBOL_REF:
4124 temp = shallow_copy_rtx (x);
4125 PUT_MODE (temp, mode);
4126 return temp;
4127 case CONST:
4128 temp = convert_debug_memory_address (mode, XEXP (x, 0), as);
4129 if (temp)
4130 temp = gen_rtx_CONST (mode, temp);
4131 return temp;
4132 case PLUS:
4133 case MINUS:
4134 if (CONST_INT_P (XEXP (x, 1)))
4135 {
4136 temp = convert_debug_memory_address (mode, XEXP (x, 0), as);
4137 if (temp)
4138 return gen_rtx_fmt_ee (GET_CODE (x), mode, temp, XEXP (x, 1));
4139 }
4140 break;
4141 default:
4142 break;
4143 }
4144 /* Don't know how to express ptr_extend as operation in debug info. */
4145 return NULL;
4146 }
4147 #endif /* POINTERS_EXTEND_UNSIGNED */
4148
4149 return x;
4150 }
4151
4152 /* Map from SSA_NAMEs to corresponding DEBUG_EXPR_DECLs created
4153 by avoid_deep_ter_for_debug. */
4154
4155 static hash_map<tree, tree> *deep_ter_debug_map;
4156
4157 /* Split too deep TER chains for debug stmts using debug temporaries. */
4158
4159 static void
4160 avoid_deep_ter_for_debug (gimple *stmt, int depth)
4161 {
4162 use_operand_p use_p;
4163 ssa_op_iter iter;
4164 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
4165 {
4166 tree use = USE_FROM_PTR (use_p);
4167 if (TREE_CODE (use) != SSA_NAME || SSA_NAME_IS_DEFAULT_DEF (use))
4168 continue;
4169 gimple *g = get_gimple_for_ssa_name (use);
4170 if (g == NULL)
4171 continue;
4172 if (depth > 6 && !stmt_ends_bb_p (g))
4173 {
4174 if (deep_ter_debug_map == NULL)
4175 deep_ter_debug_map = new hash_map<tree, tree>;
4176
4177 tree &vexpr = deep_ter_debug_map->get_or_insert (use);
4178 if (vexpr != NULL)
4179 continue;
4180 vexpr = make_node (DEBUG_EXPR_DECL);
4181 gimple *def_temp = gimple_build_debug_bind (vexpr, use, g);
4182 DECL_ARTIFICIAL (vexpr) = 1;
4183 TREE_TYPE (vexpr) = TREE_TYPE (use);
4184 SET_DECL_MODE (vexpr, TYPE_MODE (TREE_TYPE (use)));
4185 gimple_stmt_iterator gsi = gsi_for_stmt (g);
4186 gsi_insert_after (&gsi, def_temp, GSI_NEW_STMT);
4187 avoid_deep_ter_for_debug (def_temp, 0);
4188 }
4189 else
4190 avoid_deep_ter_for_debug (g, depth + 1);
4191 }
4192 }
4193
4194 /* Return an RTX equivalent to the value of the parameter DECL. */
4195
4196 static rtx
4197 expand_debug_parm_decl (tree decl)
4198 {
4199 rtx incoming = DECL_INCOMING_RTL (decl);
4200
4201 if (incoming
4202 && GET_MODE (incoming) != BLKmode
4203 && ((REG_P (incoming) && HARD_REGISTER_P (incoming))
4204 || (MEM_P (incoming)
4205 && REG_P (XEXP (incoming, 0))
4206 && HARD_REGISTER_P (XEXP (incoming, 0)))))
4207 {
4208 rtx rtl = gen_rtx_ENTRY_VALUE (GET_MODE (incoming));
4209
4210 #ifdef HAVE_window_save
4211 /* DECL_INCOMING_RTL uses the INCOMING_REGNO of parameter registers.
4212 If the target machine has an explicit window save instruction, the
4213 actual entry value is the corresponding OUTGOING_REGNO instead. */
4214 if (REG_P (incoming)
4215 && OUTGOING_REGNO (REGNO (incoming)) != REGNO (incoming))
4216 incoming
4217 = gen_rtx_REG_offset (incoming, GET_MODE (incoming),
4218 OUTGOING_REGNO (REGNO (incoming)), 0);
4219 else if (MEM_P (incoming))
4220 {
4221 rtx reg = XEXP (incoming, 0);
4222 if (OUTGOING_REGNO (REGNO (reg)) != REGNO (reg))
4223 {
4224 reg = gen_raw_REG (GET_MODE (reg), OUTGOING_REGNO (REGNO (reg)));
4225 incoming = replace_equiv_address_nv (incoming, reg);
4226 }
4227 else
4228 incoming = copy_rtx (incoming);
4229 }
4230 #endif
4231
4232 ENTRY_VALUE_EXP (rtl) = incoming;
4233 return rtl;
4234 }
4235
4236 if (incoming
4237 && GET_MODE (incoming) != BLKmode
4238 && !TREE_ADDRESSABLE (decl)
4239 && MEM_P (incoming)
4240 && (XEXP (incoming, 0) == virtual_incoming_args_rtx
4241 || (GET_CODE (XEXP (incoming, 0)) == PLUS
4242 && XEXP (XEXP (incoming, 0), 0) == virtual_incoming_args_rtx
4243 && CONST_INT_P (XEXP (XEXP (incoming, 0), 1)))))
4244 return copy_rtx (incoming);
4245
4246 return NULL_RTX;
4247 }
4248
4249 /* Return an RTX equivalent to the value of the tree expression EXP. */
4250
4251 static rtx
4252 expand_debug_expr (tree exp)
4253 {
4254 rtx op0 = NULL_RTX, op1 = NULL_RTX, op2 = NULL_RTX;
4255 machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
4256 machine_mode inner_mode = VOIDmode;
4257 int unsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
4258 addr_space_t as;
4259 scalar_int_mode op0_mode, op1_mode, addr_mode;
4260
4261 switch (TREE_CODE_CLASS (TREE_CODE (exp)))
4262 {
4263 case tcc_expression:
4264 switch (TREE_CODE (exp))
4265 {
4266 case COND_EXPR:
4267 case DOT_PROD_EXPR:
4268 case SAD_EXPR:
4269 case WIDEN_MULT_PLUS_EXPR:
4270 case WIDEN_MULT_MINUS_EXPR:
4271 goto ternary;
4272
4273 case TRUTH_ANDIF_EXPR:
4274 case TRUTH_ORIF_EXPR:
4275 case TRUTH_AND_EXPR:
4276 case TRUTH_OR_EXPR:
4277 case TRUTH_XOR_EXPR:
4278 goto binary;
4279
4280 case TRUTH_NOT_EXPR:
4281 goto unary;
4282
4283 default:
4284 break;
4285 }
4286 break;
4287
4288 ternary:
4289 op2 = expand_debug_expr (TREE_OPERAND (exp, 2));
4290 if (!op2)
4291 return NULL_RTX;
4292 /* Fall through. */
4293
4294 binary:
4295 case tcc_binary:
4296 if (mode == BLKmode)
4297 return NULL_RTX;
4298 op1 = expand_debug_expr (TREE_OPERAND (exp, 1));
4299 if (!op1)
4300 return NULL_RTX;
4301 switch (TREE_CODE (exp))
4302 {
4303 case LSHIFT_EXPR:
4304 case RSHIFT_EXPR:
4305 case LROTATE_EXPR:
4306 case RROTATE_EXPR:
4307 case WIDEN_LSHIFT_EXPR:
4308 /* Ensure second operand isn't wider than the first one. */
4309 inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 1)));
4310 if (is_a <scalar_int_mode> (inner_mode, &op1_mode)
4311 && (GET_MODE_UNIT_PRECISION (mode)
4312 < GET_MODE_PRECISION (op1_mode)))
4313 op1 = lowpart_subreg (GET_MODE_INNER (mode), op1, op1_mode);
4314 break;
4315 default:
4316 break;
4317 }
4318 /* Fall through. */
4319
4320 unary:
4321 case tcc_unary:
4322 if (mode == BLKmode)
4323 return NULL_RTX;
4324 inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
4325 op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
4326 if (!op0)
4327 return NULL_RTX;
4328 break;
4329
4330 case tcc_comparison:
4331 unsignedp = TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)));
4332 goto binary;
4333
4334 case tcc_type:
4335 case tcc_statement:
4336 gcc_unreachable ();
4337
4338 case tcc_constant:
4339 case tcc_exceptional:
4340 case tcc_declaration:
4341 case tcc_reference:
4342 case tcc_vl_exp:
4343 break;
4344 }
4345
4346 switch (TREE_CODE (exp))
4347 {
4348 case STRING_CST:
4349 if (!lookup_constant_def (exp))
4350 {
4351 if (strlen (TREE_STRING_POINTER (exp)) + 1
4352 != (size_t) TREE_STRING_LENGTH (exp))
4353 return NULL_RTX;
4354 op0 = gen_rtx_CONST_STRING (Pmode, TREE_STRING_POINTER (exp));
4355 op0 = gen_rtx_MEM (BLKmode, op0);
4356 set_mem_attributes (op0, exp, 0);
4357 return op0;
4358 }
4359 /* Fall through. */
4360
4361 case INTEGER_CST:
4362 case REAL_CST:
4363 case FIXED_CST:
4364 op0 = expand_expr (exp, NULL_RTX, mode, EXPAND_INITIALIZER);
4365 return op0;
4366
4367 case POLY_INT_CST:
4368 return immed_wide_int_const (poly_int_cst_value (exp), mode);
4369
4370 case COMPLEX_CST:
4371 gcc_assert (COMPLEX_MODE_P (mode));
4372 op0 = expand_debug_expr (TREE_REALPART (exp));
4373 op1 = expand_debug_expr (TREE_IMAGPART (exp));
4374 return gen_rtx_CONCAT (mode, op0, op1);
4375
4376 case DEBUG_EXPR_DECL:
4377 op0 = DECL_RTL_IF_SET (exp);
4378
4379 if (op0)
4380 return op0;
4381
4382 op0 = gen_rtx_DEBUG_EXPR (mode);
4383 DEBUG_EXPR_TREE_DECL (op0) = exp;
4384 SET_DECL_RTL (exp, op0);
4385
4386 return op0;
4387
4388 case VAR_DECL:
4389 case PARM_DECL:
4390 case FUNCTION_DECL:
4391 case LABEL_DECL:
4392 case CONST_DECL:
4393 case RESULT_DECL:
4394 op0 = DECL_RTL_IF_SET (exp);
4395
4396 /* This decl was probably optimized away. */
4397 if (!op0
4398 /* At least label RTXen are sometimes replaced by
4399 NOTE_INSN_DELETED_LABEL. Any notes here are not
4400 handled by copy_rtx. */
4401 || NOTE_P (op0))
4402 {
4403 if (!VAR_P (exp)
4404 || DECL_EXTERNAL (exp)
4405 || !TREE_STATIC (exp)
4406 || !DECL_NAME (exp)
4407 || DECL_HARD_REGISTER (exp)
4408 || DECL_IN_CONSTANT_POOL (exp)
4409 || mode == VOIDmode)
4410 return NULL;
4411
4412 op0 = make_decl_rtl_for_debug (exp);
4413 if (!MEM_P (op0)
4414 || GET_CODE (XEXP (op0, 0)) != SYMBOL_REF
4415 || SYMBOL_REF_DECL (XEXP (op0, 0)) != exp)
4416 return NULL;
4417 }
4418 else
4419 op0 = copy_rtx (op0);
4420
4421 if (GET_MODE (op0) == BLKmode
4422 /* If op0 is not BLKmode, but mode is, adjust_mode
4423 below would ICE. While it is likely a FE bug,
4424 try to be robust here. See PR43166. */
4425 || mode == BLKmode
4426 || (mode == VOIDmode && GET_MODE (op0) != VOIDmode))
4427 {
4428 gcc_assert (MEM_P (op0));
4429 op0 = adjust_address_nv (op0, mode, 0);
4430 return op0;
4431 }
4432
4433 /* Fall through. */
4434
4435 adjust_mode:
4436 case PAREN_EXPR:
4437 CASE_CONVERT:
4438 {
4439 inner_mode = GET_MODE (op0);
4440
4441 if (mode == inner_mode)
4442 return op0;
4443
4444 if (inner_mode == VOIDmode)
4445 {
4446 if (TREE_CODE (exp) == SSA_NAME)
4447 inner_mode = TYPE_MODE (TREE_TYPE (exp));
4448 else
4449 inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
4450 if (mode == inner_mode)
4451 return op0;
4452 }
4453
4454 if (FLOAT_MODE_P (mode) && FLOAT_MODE_P (inner_mode))
4455 {
4456 if (GET_MODE_UNIT_BITSIZE (mode)
4457 == GET_MODE_UNIT_BITSIZE (inner_mode))
4458 op0 = simplify_gen_subreg (mode, op0, inner_mode, 0);
4459 else if (GET_MODE_UNIT_BITSIZE (mode)
4460 < GET_MODE_UNIT_BITSIZE (inner_mode))
4461 op0 = simplify_gen_unary (FLOAT_TRUNCATE, mode, op0, inner_mode);
4462 else
4463 op0 = simplify_gen_unary (FLOAT_EXTEND, mode, op0, inner_mode);
4464 }
4465 else if (FLOAT_MODE_P (mode))
4466 {
4467 gcc_assert (TREE_CODE (exp) != SSA_NAME);
4468 if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))))
4469 op0 = simplify_gen_unary (UNSIGNED_FLOAT, mode, op0, inner_mode);
4470 else
4471 op0 = simplify_gen_unary (FLOAT, mode, op0, inner_mode);
4472 }
4473 else if (FLOAT_MODE_P (inner_mode))
4474 {
4475 if (unsignedp)
4476 op0 = simplify_gen_unary (UNSIGNED_FIX, mode, op0, inner_mode);
4477 else
4478 op0 = simplify_gen_unary (FIX, mode, op0, inner_mode);
4479 }
4480 else if (GET_MODE_UNIT_PRECISION (mode)
4481 == GET_MODE_UNIT_PRECISION (inner_mode))
4482 op0 = lowpart_subreg (mode, op0, inner_mode);
4483 else if (GET_MODE_UNIT_PRECISION (mode)
4484 < GET_MODE_UNIT_PRECISION (inner_mode))
4485 op0 = simplify_gen_unary (TRUNCATE, mode, op0, inner_mode);
4486 else if (UNARY_CLASS_P (exp)
4487 ? TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)))
4488 : unsignedp)
4489 op0 = simplify_gen_unary (ZERO_EXTEND, mode, op0, inner_mode);
4490 else
4491 op0 = simplify_gen_unary (SIGN_EXTEND, mode, op0, inner_mode);
4492
4493 return op0;
4494 }
4495
4496 case MEM_REF:
4497 if (!is_gimple_mem_ref_addr (TREE_OPERAND (exp, 0)))
4498 {
4499 tree newexp = fold_binary (MEM_REF, TREE_TYPE (exp),
4500 TREE_OPERAND (exp, 0),
4501 TREE_OPERAND (exp, 1));
4502 if (newexp)
4503 return expand_debug_expr (newexp);
4504 }
4505 /* FALLTHROUGH */
4506 case INDIRECT_REF:
4507 inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
4508 op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
4509 if (!op0)
4510 return NULL;
4511
4512 if (TREE_CODE (exp) == MEM_REF)
4513 {
4514 if (GET_CODE (op0) == DEBUG_IMPLICIT_PTR
4515 || (GET_CODE (op0) == PLUS
4516 && GET_CODE (XEXP (op0, 0)) == DEBUG_IMPLICIT_PTR))
4517 /* (mem (debug_implicit_ptr)) might confuse aliasing.
4518 Instead just use get_inner_reference. */
4519 goto component_ref;
4520
4521 op1 = expand_debug_expr (TREE_OPERAND (exp, 1));
4522 poly_int64 offset;
4523 if (!op1 || !poly_int_rtx_p (op1, &offset))
4524 return NULL;
4525
4526 op0 = plus_constant (inner_mode, op0, offset);
4527 }
4528
4529 as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
4530
4531 op0 = convert_debug_memory_address (targetm.addr_space.address_mode (as),
4532 op0, as);
4533 if (op0 == NULL_RTX)
4534 return NULL;
4535
4536 op0 = gen_rtx_MEM (mode, op0);
4537 set_mem_attributes (op0, exp, 0);
4538 if (TREE_CODE (exp) == MEM_REF
4539 && !is_gimple_mem_ref_addr (TREE_OPERAND (exp, 0)))
4540 set_mem_expr (op0, NULL_TREE);
4541 set_mem_addr_space (op0, as);
4542
4543 return op0;
4544
4545 case TARGET_MEM_REF:
4546 if (TREE_CODE (TMR_BASE (exp)) == ADDR_EXPR
4547 && !DECL_RTL_SET_P (TREE_OPERAND (TMR_BASE (exp), 0)))
4548 return NULL;
4549
4550 op0 = expand_debug_expr
4551 (tree_mem_ref_addr (build_pointer_type (TREE_TYPE (exp)), exp));
4552 if (!op0)
4553 return NULL;
4554
4555 as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
4556 op0 = convert_debug_memory_address (targetm.addr_space.address_mode (as),
4557 op0, as);
4558 if (op0 == NULL_RTX)
4559 return NULL;
4560
4561 op0 = gen_rtx_MEM (mode, op0);
4562
4563 set_mem_attributes (op0, exp, 0);
4564 set_mem_addr_space (op0, as);
4565
4566 return op0;
4567
4568 component_ref:
4569 case ARRAY_REF:
4570 case ARRAY_RANGE_REF:
4571 case COMPONENT_REF:
4572 case BIT_FIELD_REF:
4573 case REALPART_EXPR:
4574 case IMAGPART_EXPR:
4575 case VIEW_CONVERT_EXPR:
4576 {
4577 machine_mode mode1;
4578 poly_int64 bitsize, bitpos;
4579 tree offset;
4580 int reversep, volatilep = 0;
4581 tree tem
4582 = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
4583 &unsignedp, &reversep, &volatilep);
4584 rtx orig_op0;
4585
4586 if (known_eq (bitsize, 0))
4587 return NULL;
4588
4589 orig_op0 = op0 = expand_debug_expr (tem);
4590
4591 if (!op0)
4592 return NULL;
4593
4594 if (offset)
4595 {
4596 machine_mode addrmode, offmode;
4597
4598 if (!MEM_P (op0))
4599 return NULL;
4600
4601 op0 = XEXP (op0, 0);
4602 addrmode = GET_MODE (op0);
4603 if (addrmode == VOIDmode)
4604 addrmode = Pmode;
4605
4606 op1 = expand_debug_expr (offset);
4607 if (!op1)
4608 return NULL;
4609
4610 offmode = GET_MODE (op1);
4611 if (offmode == VOIDmode)
4612 offmode = TYPE_MODE (TREE_TYPE (offset));
4613
4614 if (addrmode != offmode)
4615 op1 = lowpart_subreg (addrmode, op1, offmode);
4616
4617 /* Don't use offset_address here, we don't need a
4618 recognizable address, and we don't want to generate
4619 code. */
4620 op0 = gen_rtx_MEM (mode, simplify_gen_binary (PLUS, addrmode,
4621 op0, op1));
4622 }
4623
4624 if (MEM_P (op0))
4625 {
4626 if (mode1 == VOIDmode)
4627 {
4628 if (maybe_gt (bitsize, MAX_BITSIZE_MODE_ANY_INT))
4629 return NULL;
4630 /* Bitfield. */
4631 mode1 = smallest_int_mode_for_size (bitsize);
4632 }
4633 poly_int64 bytepos = bits_to_bytes_round_down (bitpos);
4634 if (maybe_ne (bytepos, 0))
4635 {
4636 op0 = adjust_address_nv (op0, mode1, bytepos);
4637 bitpos = num_trailing_bits (bitpos);
4638 }
4639 else if (known_eq (bitpos, 0)
4640 && known_eq (bitsize, GET_MODE_BITSIZE (mode)))
4641 op0 = adjust_address_nv (op0, mode, 0);
4642 else if (GET_MODE (op0) != mode1)
4643 op0 = adjust_address_nv (op0, mode1, 0);
4644 else
4645 op0 = copy_rtx (op0);
4646 if (op0 == orig_op0)
4647 op0 = shallow_copy_rtx (op0);
4648 set_mem_attributes (op0, exp, 0);
4649 }
4650
4651 if (known_eq (bitpos, 0) && mode == GET_MODE (op0))
4652 return op0;
4653
4654 if (maybe_lt (bitpos, 0))
4655 return NULL;
4656
4657 if (GET_MODE (op0) == BLKmode || mode == BLKmode)
4658 return NULL;
4659
4660 poly_int64 bytepos;
4661 if (multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
4662 && known_eq (bitsize, GET_MODE_BITSIZE (mode1)))
4663 {
4664 machine_mode opmode = GET_MODE (op0);
4665
4666 if (opmode == VOIDmode)
4667 opmode = TYPE_MODE (TREE_TYPE (tem));
4668
4669 /* This condition may hold if we're expanding the address
4670 right past the end of an array that turned out not to
4671 be addressable (i.e., the address was only computed in
4672 debug stmts). The gen_subreg below would rightfully
4673 crash, and the address doesn't really exist, so just
4674 drop it. */
4675 if (known_ge (bitpos, GET_MODE_BITSIZE (opmode)))
4676 return NULL;
4677
4678 if (multiple_p (bitpos, GET_MODE_BITSIZE (mode)))
4679 return simplify_gen_subreg (mode, op0, opmode, bytepos);
4680 }
4681
4682 return simplify_gen_ternary (SCALAR_INT_MODE_P (GET_MODE (op0))
4683 && TYPE_UNSIGNED (TREE_TYPE (exp))
4684 ? SIGN_EXTRACT
4685 : ZERO_EXTRACT, mode,
4686 GET_MODE (op0) != VOIDmode
4687 ? GET_MODE (op0)
4688 : TYPE_MODE (TREE_TYPE (tem)),
4689 op0, gen_int_mode (bitsize, word_mode),
4690 gen_int_mode (bitpos, word_mode));
4691 }
4692
4693 case ABS_EXPR:
4694 case ABSU_EXPR:
4695 return simplify_gen_unary (ABS, mode, op0, mode);
4696
4697 case NEGATE_EXPR:
4698 return simplify_gen_unary (NEG, mode, op0, mode);
4699
4700 case BIT_NOT_EXPR:
4701 return simplify_gen_unary (NOT, mode, op0, mode);
4702
4703 case FLOAT_EXPR:
4704 return simplify_gen_unary (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp,
4705 0)))
4706 ? UNSIGNED_FLOAT : FLOAT, mode, op0,
4707 inner_mode);
4708
4709 case FIX_TRUNC_EXPR:
4710 return simplify_gen_unary (unsignedp ? UNSIGNED_FIX : FIX, mode, op0,
4711 inner_mode);
4712
4713 case POINTER_PLUS_EXPR:
4714 /* For the rare target where pointers are not the same size as
4715 size_t, we need to check for mis-matched modes and correct
4716 the addend. */
4717 if (op0 && op1
4718 && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
4719 && is_a <scalar_int_mode> (GET_MODE (op1), &op1_mode)
4720 && op0_mode != op1_mode)
4721 {
4722 if (GET_MODE_BITSIZE (op0_mode) < GET_MODE_BITSIZE (op1_mode)
4723 /* If OP0 is a partial mode, then we must truncate, even
4724 if it has the same bitsize as OP1 as GCC's
4725 representation of partial modes is opaque. */
4726 || (GET_MODE_CLASS (op0_mode) == MODE_PARTIAL_INT
4727 && (GET_MODE_BITSIZE (op0_mode)
4728 == GET_MODE_BITSIZE (op1_mode))))
4729 op1 = simplify_gen_unary (TRUNCATE, op0_mode, op1, op1_mode);
4730 else
4731 /* We always sign-extend, regardless of the signedness of
4732 the operand, because the operand is always unsigned
4733 here even if the original C expression is signed. */
4734 op1 = simplify_gen_unary (SIGN_EXTEND, op0_mode, op1, op1_mode);
4735 }
4736 /* Fall through. */
4737 case PLUS_EXPR:
4738 return simplify_gen_binary (PLUS, mode, op0, op1);
4739
4740 case MINUS_EXPR:
4741 case POINTER_DIFF_EXPR:
4742 return simplify_gen_binary (MINUS, mode, op0, op1);
4743
4744 case MULT_EXPR:
4745 return simplify_gen_binary (MULT, mode, op0, op1);
4746
4747 case RDIV_EXPR:
4748 case TRUNC_DIV_EXPR:
4749 case EXACT_DIV_EXPR:
4750 if (unsignedp)
4751 return simplify_gen_binary (UDIV, mode, op0, op1);
4752 else
4753 return simplify_gen_binary (DIV, mode, op0, op1);
4754
4755 case TRUNC_MOD_EXPR:
4756 return simplify_gen_binary (unsignedp ? UMOD : MOD, mode, op0, op1);
4757
4758 case FLOOR_DIV_EXPR:
4759 if (unsignedp)
4760 return simplify_gen_binary (UDIV, mode, op0, op1);
4761 else
4762 {
4763 rtx div = simplify_gen_binary (DIV, mode, op0, op1);
4764 rtx mod = simplify_gen_binary (MOD, mode, op0, op1);
4765 rtx adj = floor_sdiv_adjust (mode, mod, op1);
4766 return simplify_gen_binary (PLUS, mode, div, adj);
4767 }
4768
4769 case FLOOR_MOD_EXPR:
4770 if (unsignedp)
4771 return simplify_gen_binary (UMOD, mode, op0, op1);
4772 else
4773 {
4774 rtx mod = simplify_gen_binary (MOD, mode, op0, op1);
4775 rtx adj = floor_sdiv_adjust (mode, mod, op1);
4776 adj = simplify_gen_unary (NEG, mode,
4777 simplify_gen_binary (MULT, mode, adj, op1),
4778 mode);
4779 return simplify_gen_binary (PLUS, mode, mod, adj);
4780 }
4781
4782 case CEIL_DIV_EXPR:
4783 if (unsignedp)
4784 {
4785 rtx div = simplify_gen_binary (UDIV, mode, op0, op1);
4786 rtx mod = simplify_gen_binary (UMOD, mode, op0, op1);
4787 rtx adj = ceil_udiv_adjust (mode, mod, op1);
4788 return simplify_gen_binary (PLUS, mode, div, adj);
4789 }
4790 else
4791 {
4792 rtx div = simplify_gen_binary (DIV, mode, op0, op1);
4793 rtx mod = simplify_gen_binary (MOD, mode, op0, op1);
4794 rtx adj = ceil_sdiv_adjust (mode, mod, op1);
4795 return simplify_gen_binary (PLUS, mode, div, adj);
4796 }
4797
4798 case CEIL_MOD_EXPR:
4799 if (unsignedp)
4800 {
4801 rtx mod = simplify_gen_binary (UMOD, mode, op0, op1);
4802 rtx adj = ceil_udiv_adjust (mode, mod, op1);
4803 adj = simplify_gen_unary (NEG, mode,
4804 simplify_gen_binary (MULT, mode, adj, op1),
4805 mode);
4806 return simplify_gen_binary (PLUS, mode, mod, adj);
4807 }
4808 else
4809 {
4810 rtx mod = simplify_gen_binary (MOD, mode, op0, op1);
4811 rtx adj = ceil_sdiv_adjust (mode, mod, op1);
4812 adj = simplify_gen_unary (NEG, mode,
4813 simplify_gen_binary (MULT, mode, adj, op1),
4814 mode);
4815 return simplify_gen_binary (PLUS, mode, mod, adj);
4816 }
4817
4818 case ROUND_DIV_EXPR:
4819 if (unsignedp)
4820 {
4821 rtx div = simplify_gen_binary (UDIV, mode, op0, op1);
4822 rtx mod = simplify_gen_binary (UMOD, mode, op0, op1);
4823 rtx adj = round_udiv_adjust (mode, mod, op1);
4824 return simplify_gen_binary (PLUS, mode, div, adj);
4825 }
4826 else
4827 {
4828 rtx div = simplify_gen_binary (DIV, mode, op0, op1);
4829 rtx mod = simplify_gen_binary (MOD, mode, op0, op1);
4830 rtx adj = round_sdiv_adjust (mode, mod, op1);
4831 return simplify_gen_binary (PLUS, mode, div, adj);
4832 }
4833
4834 case ROUND_MOD_EXPR:
4835 if (unsignedp)
4836 {
4837 rtx mod = simplify_gen_binary (UMOD, mode, op0, op1);
4838 rtx adj = round_udiv_adjust (mode, mod, op1);
4839 adj = simplify_gen_unary (NEG, mode,
4840 simplify_gen_binary (MULT, mode, adj, op1),
4841 mode);
4842 return simplify_gen_binary (PLUS, mode, mod, adj);
4843 }
4844 else
4845 {
4846 rtx mod = simplify_gen_binary (MOD, mode, op0, op1);
4847 rtx adj = round_sdiv_adjust (mode, mod, op1);
4848 adj = simplify_gen_unary (NEG, mode,
4849 simplify_gen_binary (MULT, mode, adj, op1),
4850 mode);
4851 return simplify_gen_binary (PLUS, mode, mod, adj);
4852 }
4853
4854 case LSHIFT_EXPR:
4855 return simplify_gen_binary (ASHIFT, mode, op0, op1);
4856
4857 case RSHIFT_EXPR:
4858 if (unsignedp)
4859 return simplify_gen_binary (LSHIFTRT, mode, op0, op1);
4860 else
4861 return simplify_gen_binary (ASHIFTRT, mode, op0, op1);
4862
4863 case LROTATE_EXPR:
4864 return simplify_gen_binary (ROTATE, mode, op0, op1);
4865
4866 case RROTATE_EXPR:
4867 return simplify_gen_binary (ROTATERT, mode, op0, op1);
4868
4869 case MIN_EXPR:
4870 return simplify_gen_binary (unsignedp ? UMIN : SMIN, mode, op0, op1);
4871
4872 case MAX_EXPR:
4873 return simplify_gen_binary (unsignedp ? UMAX : SMAX, mode, op0, op1);
4874
4875 case BIT_AND_EXPR:
4876 case TRUTH_AND_EXPR:
4877 return simplify_gen_binary (AND, mode, op0, op1);
4878
4879 case BIT_IOR_EXPR:
4880 case TRUTH_OR_EXPR:
4881 return simplify_gen_binary (IOR, mode, op0, op1);
4882
4883 case BIT_XOR_EXPR:
4884 case TRUTH_XOR_EXPR:
4885 return simplify_gen_binary (XOR, mode, op0, op1);
4886
4887 case TRUTH_ANDIF_EXPR:
4888 return gen_rtx_IF_THEN_ELSE (mode, op0, op1, const0_rtx);
4889
4890 case TRUTH_ORIF_EXPR:
4891 return gen_rtx_IF_THEN_ELSE (mode, op0, const_true_rtx, op1);
4892
4893 case TRUTH_NOT_EXPR:
4894 return simplify_gen_relational (EQ, mode, inner_mode, op0, const0_rtx);
4895
4896 case LT_EXPR:
4897 return simplify_gen_relational (unsignedp ? LTU : LT, mode, inner_mode,
4898 op0, op1);
4899
4900 case LE_EXPR:
4901 return simplify_gen_relational (unsignedp ? LEU : LE, mode, inner_mode,
4902 op0, op1);
4903
4904 case GT_EXPR:
4905 return simplify_gen_relational (unsignedp ? GTU : GT, mode, inner_mode,
4906 op0, op1);
4907
4908 case GE_EXPR:
4909 return simplify_gen_relational (unsignedp ? GEU : GE, mode, inner_mode,
4910 op0, op1);
4911
4912 case EQ_EXPR:
4913 return simplify_gen_relational (EQ, mode, inner_mode, op0, op1);
4914
4915 case NE_EXPR:
4916 return simplify_gen_relational (NE, mode, inner_mode, op0, op1);
4917
4918 case UNORDERED_EXPR:
4919 return simplify_gen_relational (UNORDERED, mode, inner_mode, op0, op1);
4920
4921 case ORDERED_EXPR:
4922 return simplify_gen_relational (ORDERED, mode, inner_mode, op0, op1);
4923
4924 case UNLT_EXPR:
4925 return simplify_gen_relational (UNLT, mode, inner_mode, op0, op1);
4926
4927 case UNLE_EXPR:
4928 return simplify_gen_relational (UNLE, mode, inner_mode, op0, op1);
4929
4930 case UNGT_EXPR:
4931 return simplify_gen_relational (UNGT, mode, inner_mode, op0, op1);
4932
4933 case UNGE_EXPR:
4934 return simplify_gen_relational (UNGE, mode, inner_mode, op0, op1);
4935
4936 case UNEQ_EXPR:
4937 return simplify_gen_relational (UNEQ, mode, inner_mode, op0, op1);
4938
4939 case LTGT_EXPR:
4940 return simplify_gen_relational (LTGT, mode, inner_mode, op0, op1);
4941
4942 case COND_EXPR:
4943 return gen_rtx_IF_THEN_ELSE (mode, op0, op1, op2);
4944
4945 case COMPLEX_EXPR:
4946 gcc_assert (COMPLEX_MODE_P (mode));
4947 if (GET_MODE (op0) == VOIDmode)
4948 op0 = gen_rtx_CONST (GET_MODE_INNER (mode), op0);
4949 if (GET_MODE (op1) == VOIDmode)
4950 op1 = gen_rtx_CONST (GET_MODE_INNER (mode), op1);
4951 return gen_rtx_CONCAT (mode, op0, op1);
4952
4953 case CONJ_EXPR:
4954 if (GET_CODE (op0) == CONCAT)
4955 return gen_rtx_CONCAT (mode, XEXP (op0, 0),
4956 simplify_gen_unary (NEG, GET_MODE_INNER (mode),
4957 XEXP (op0, 1),
4958 GET_MODE_INNER (mode)));
4959 else
4960 {
4961 scalar_mode imode = GET_MODE_INNER (mode);
4962 rtx re, im;
4963
4964 if (MEM_P (op0))
4965 {
4966 re = adjust_address_nv (op0, imode, 0);
4967 im = adjust_address_nv (op0, imode, GET_MODE_SIZE (imode));
4968 }
4969 else
4970 {
4971 scalar_int_mode ifmode;
4972 scalar_int_mode ihmode;
4973 rtx halfsize;
4974 if (!int_mode_for_mode (mode).exists (&ifmode)
4975 || !int_mode_for_mode (imode).exists (&ihmode))
4976 return NULL;
4977 halfsize = GEN_INT (GET_MODE_BITSIZE (ihmode));
4978 re = op0;
4979 if (mode != ifmode)
4980 re = gen_rtx_SUBREG (ifmode, re, 0);
4981 re = gen_rtx_ZERO_EXTRACT (ihmode, re, halfsize, const0_rtx);
4982 if (imode != ihmode)
4983 re = gen_rtx_SUBREG (imode, re, 0);
4984 im = copy_rtx (op0);
4985 if (mode != ifmode)
4986 im = gen_rtx_SUBREG (ifmode, im, 0);
4987 im = gen_rtx_ZERO_EXTRACT (ihmode, im, halfsize, halfsize);
4988 if (imode != ihmode)
4989 im = gen_rtx_SUBREG (imode, im, 0);
4990 }
4991 im = gen_rtx_NEG (imode, im);
4992 return gen_rtx_CONCAT (mode, re, im);
4993 }
4994
4995 case ADDR_EXPR:
4996 op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
4997 if (!op0 || !MEM_P (op0))
4998 {
4999 if ((TREE_CODE (TREE_OPERAND (exp, 0)) == VAR_DECL
5000 || TREE_CODE (TREE_OPERAND (exp, 0)) == PARM_DECL
5001 || TREE_CODE (TREE_OPERAND (exp, 0)) == RESULT_DECL)
5002 && (!TREE_ADDRESSABLE (TREE_OPERAND (exp, 0))
5003 || target_for_debug_bind (TREE_OPERAND (exp, 0))))
5004 return gen_rtx_DEBUG_IMPLICIT_PTR (mode, TREE_OPERAND (exp, 0));
5005
5006 if (handled_component_p (TREE_OPERAND (exp, 0)))
5007 {
5008 poly_int64 bitoffset, bitsize, maxsize, byteoffset;
5009 bool reverse;
5010 tree decl
5011 = get_ref_base_and_extent (TREE_OPERAND (exp, 0), &bitoffset,
5012 &bitsize, &maxsize, &reverse);
5013 if ((VAR_P (decl)
5014 || TREE_CODE (decl) == PARM_DECL
5015 || TREE_CODE (decl) == RESULT_DECL)
5016 && (!TREE_ADDRESSABLE (decl)
5017 || target_for_debug_bind (decl))
5018 && multiple_p (bitoffset, BITS_PER_UNIT, &byteoffset)
5019 && known_gt (bitsize, 0)
5020 && known_eq (bitsize, maxsize))
5021 {
5022 rtx base = gen_rtx_DEBUG_IMPLICIT_PTR (mode, decl);
5023 return plus_constant (mode, base, byteoffset);
5024 }
5025 }
5026
5027 if (TREE_CODE (TREE_OPERAND (exp, 0)) == MEM_REF
5028 && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
5029 == ADDR_EXPR)
5030 {
5031 op0 = expand_debug_expr (TREE_OPERAND (TREE_OPERAND (exp, 0),
5032 0));
5033 if (op0 != NULL
5034 && (GET_CODE (op0) == DEBUG_IMPLICIT_PTR
5035 || (GET_CODE (op0) == PLUS
5036 && GET_CODE (XEXP (op0, 0)) == DEBUG_IMPLICIT_PTR
5037 && CONST_INT_P (XEXP (op0, 1)))))
5038 {
5039 op1 = expand_debug_expr (TREE_OPERAND (TREE_OPERAND (exp, 0),
5040 1));
5041 poly_int64 offset;
5042 if (!op1 || !poly_int_rtx_p (op1, &offset))
5043 return NULL;
5044
5045 return plus_constant (mode, op0, offset);
5046 }
5047 }
5048
5049 return NULL;
5050 }
5051
5052 as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp)));
5053 addr_mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (exp));
5054 op0 = convert_debug_memory_address (addr_mode, XEXP (op0, 0), as);
5055
5056 return op0;
5057
5058 case VECTOR_CST:
5059 {
5060 unsigned HOST_WIDE_INT i, nelts;
5061
5062 if (!VECTOR_CST_NELTS (exp).is_constant (&nelts))
5063 return NULL;
5064
5065 op0 = gen_rtx_CONCATN (mode, rtvec_alloc (nelts));
5066
5067 for (i = 0; i < nelts; ++i)
5068 {
5069 op1 = expand_debug_expr (VECTOR_CST_ELT (exp, i));
5070 if (!op1)
5071 return NULL;
5072 XVECEXP (op0, 0, i) = op1;
5073 }
5074
5075 return op0;
5076 }
5077
5078 case CONSTRUCTOR:
5079 if (TREE_CLOBBER_P (exp))
5080 return NULL;
5081 else if (TREE_CODE (TREE_TYPE (exp)) == VECTOR_TYPE)
5082 {
5083 unsigned i;
5084 unsigned HOST_WIDE_INT nelts;
5085 tree val;
5086
5087 if (!TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)).is_constant (&nelts))
5088 goto flag_unsupported;
5089
5090 op0 = gen_rtx_CONCATN (mode, rtvec_alloc (nelts));
5091
5092 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), i, val)
5093 {
5094 op1 = expand_debug_expr (val);
5095 if (!op1)
5096 return NULL;
5097 XVECEXP (op0, 0, i) = op1;
5098 }
5099
5100 if (i < nelts)
5101 {
5102 op1 = expand_debug_expr
5103 (build_zero_cst (TREE_TYPE (TREE_TYPE (exp))));
5104
5105 if (!op1)
5106 return NULL;
5107
5108 for (; i < nelts; i++)
5109 XVECEXP (op0, 0, i) = op1;
5110 }
5111
5112 return op0;
5113 }
5114 else
5115 goto flag_unsupported;
5116
5117 case CALL_EXPR:
5118 /* ??? Maybe handle some builtins? */
5119 return NULL;
5120
5121 case SSA_NAME:
5122 {
5123 gimple *g = get_gimple_for_ssa_name (exp);
5124 if (g)
5125 {
5126 tree t = NULL_TREE;
5127 if (deep_ter_debug_map)
5128 {
5129 tree *slot = deep_ter_debug_map->get (exp);
5130 if (slot)
5131 t = *slot;
5132 }
5133 if (t == NULL_TREE)
5134 t = gimple_assign_rhs_to_tree (g);
5135 op0 = expand_debug_expr (t);
5136 if (!op0)
5137 return NULL;
5138 }
5139 else
5140 {
5141 /* If this is a reference to an incoming value of
5142 parameter that is never used in the code or where the
5143 incoming value is never used in the code, use
5144 PARM_DECL's DECL_RTL if set. */
5145 if (SSA_NAME_IS_DEFAULT_DEF (exp)
5146 && SSA_NAME_VAR (exp)
5147 && TREE_CODE (SSA_NAME_VAR (exp)) == PARM_DECL
5148 && has_zero_uses (exp))
5149 {
5150 op0 = expand_debug_parm_decl (SSA_NAME_VAR (exp));
5151 if (op0)
5152 goto adjust_mode;
5153 op0 = expand_debug_expr (SSA_NAME_VAR (exp));
5154 if (op0)
5155 goto adjust_mode;
5156 }
5157
5158 int part = var_to_partition (SA.map, exp);
5159
5160 if (part == NO_PARTITION)
5161 return NULL;
5162
5163 gcc_assert (part >= 0 && (unsigned)part < SA.map->num_partitions);
5164
5165 op0 = copy_rtx (SA.partition_to_pseudo[part]);
5166 }
5167 goto adjust_mode;
5168 }
5169
5170 case ERROR_MARK:
5171 return NULL;
5172
5173 /* Vector stuff. For most of the codes we don't have rtl codes. */
5174 case REALIGN_LOAD_EXPR:
5175 case VEC_COND_EXPR:
5176 case VEC_PACK_FIX_TRUNC_EXPR:
5177 case VEC_PACK_FLOAT_EXPR:
5178 case VEC_PACK_SAT_EXPR:
5179 case VEC_PACK_TRUNC_EXPR:
5180 case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
5181 case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
5182 case VEC_UNPACK_FLOAT_HI_EXPR:
5183 case VEC_UNPACK_FLOAT_LO_EXPR:
5184 case VEC_UNPACK_HI_EXPR:
5185 case VEC_UNPACK_LO_EXPR:
5186 case VEC_WIDEN_MULT_HI_EXPR:
5187 case VEC_WIDEN_MULT_LO_EXPR:
5188 case VEC_WIDEN_MULT_EVEN_EXPR:
5189 case VEC_WIDEN_MULT_ODD_EXPR:
5190 case VEC_WIDEN_LSHIFT_HI_EXPR:
5191 case VEC_WIDEN_LSHIFT_LO_EXPR:
5192 case VEC_PERM_EXPR:
5193 case VEC_DUPLICATE_EXPR:
5194 case VEC_SERIES_EXPR:
5195 return NULL;
5196
5197 /* Misc codes. */
5198 case ADDR_SPACE_CONVERT_EXPR:
5199 case FIXED_CONVERT_EXPR:
5200 case OBJ_TYPE_REF:
5201 case WITH_SIZE_EXPR:
5202 case BIT_INSERT_EXPR:
5203 return NULL;
5204
5205 case DOT_PROD_EXPR:
5206 if (SCALAR_INT_MODE_P (GET_MODE (op0))
5207 && SCALAR_INT_MODE_P (mode))
5208 {
5209 op0
5210 = simplify_gen_unary (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp,
5211 0)))
5212 ? ZERO_EXTEND : SIGN_EXTEND, mode, op0,
5213 inner_mode);
5214 op1
5215 = simplify_gen_unary (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp,
5216 1)))
5217 ? ZERO_EXTEND : SIGN_EXTEND, mode, op1,
5218 inner_mode);
5219 op0 = simplify_gen_binary (MULT, mode, op0, op1);
5220 return simplify_gen_binary (PLUS, mode, op0, op2);
5221 }
5222 return NULL;
5223
5224 case WIDEN_MULT_EXPR:
5225 case WIDEN_MULT_PLUS_EXPR:
5226 case WIDEN_MULT_MINUS_EXPR:
5227 if (SCALAR_INT_MODE_P (GET_MODE (op0))
5228 && SCALAR_INT_MODE_P (mode))
5229 {
5230 inner_mode = GET_MODE (op0);
5231 if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))))
5232 op0 = simplify_gen_unary (ZERO_EXTEND, mode, op0, inner_mode);
5233 else
5234 op0 = simplify_gen_unary (SIGN_EXTEND, mode, op0, inner_mode);
5235 if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 1))))
5236 op1 = simplify_gen_unary (ZERO_EXTEND, mode, op1, inner_mode);
5237 else
5238 op1 = simplify_gen_unary (SIGN_EXTEND, mode, op1, inner_mode);
5239 op0 = simplify_gen_binary (MULT, mode, op0, op1);
5240 if (TREE_CODE (exp) == WIDEN_MULT_EXPR)
5241 return op0;
5242 else if (TREE_CODE (exp) == WIDEN_MULT_PLUS_EXPR)
5243 return simplify_gen_binary (PLUS, mode, op0, op2);
5244 else
5245 return simplify_gen_binary (MINUS, mode, op2, op0);
5246 }
5247 return NULL;
5248
5249 case MULT_HIGHPART_EXPR:
5250 /* ??? Similar to the above. */
5251 return NULL;
5252
5253 case WIDEN_SUM_EXPR:
5254 case WIDEN_LSHIFT_EXPR:
5255 if (SCALAR_INT_MODE_P (GET_MODE (op0))
5256 && SCALAR_INT_MODE_P (mode))
5257 {
5258 op0
5259 = simplify_gen_unary (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp,
5260 0)))
5261 ? ZERO_EXTEND : SIGN_EXTEND, mode, op0,
5262 inner_mode);
5263 return simplify_gen_binary (TREE_CODE (exp) == WIDEN_LSHIFT_EXPR
5264 ? ASHIFT : PLUS, mode, op0, op1);
5265 }
5266 return NULL;
5267
5268 default:
5269 flag_unsupported:
5270 if (flag_checking)
5271 {
5272 debug_tree (exp);
5273 gcc_unreachable ();
5274 }
5275 return NULL;
5276 }
5277 }
5278
5279 /* Return an RTX equivalent to the source bind value of the tree expression
5280 EXP. */
5281
5282 static rtx
5283 expand_debug_source_expr (tree exp)
5284 {
5285 rtx op0 = NULL_RTX;
5286 machine_mode mode = VOIDmode, inner_mode;
5287
5288 switch (TREE_CODE (exp))
5289 {
5290 case VAR_DECL:
5291 if (DECL_ABSTRACT_ORIGIN (exp))
5292 return expand_debug_source_expr (DECL_ABSTRACT_ORIGIN (exp));
5293 break;
5294 case PARM_DECL:
5295 {
5296 mode = DECL_MODE (exp);
5297 op0 = expand_debug_parm_decl (exp);
5298 if (op0)
5299 break;
5300 /* See if this isn't an argument that has been completely
5301 optimized out. */
5302 if (!DECL_RTL_SET_P (exp)
5303 && !DECL_INCOMING_RTL (exp)
5304 && DECL_ABSTRACT_ORIGIN (current_function_decl))
5305 {
5306 tree aexp = DECL_ORIGIN (exp);
5307 if (DECL_CONTEXT (aexp)
5308 == DECL_ABSTRACT_ORIGIN (current_function_decl))
5309 {
5310 vec<tree, va_gc> **debug_args;
5311 unsigned int ix;
5312 tree ddecl;
5313 debug_args = decl_debug_args_lookup (current_function_decl);
5314 if (debug_args != NULL)
5315 {
5316 for (ix = 0; vec_safe_iterate (*debug_args, ix, &ddecl);
5317 ix += 2)
5318 if (ddecl == aexp)
5319 return gen_rtx_DEBUG_PARAMETER_REF (mode, aexp);
5320 }
5321 }
5322 }
5323 break;
5324 }
5325 default:
5326 break;
5327 }
5328
5329 if (op0 == NULL_RTX)
5330 return NULL_RTX;
5331
5332 inner_mode = GET_MODE (op0);
5333 if (mode == inner_mode)
5334 return op0;
5335
5336 if (FLOAT_MODE_P (mode) && FLOAT_MODE_P (inner_mode))
5337 {
5338 if (GET_MODE_UNIT_BITSIZE (mode)
5339 == GET_MODE_UNIT_BITSIZE (inner_mode))
5340 op0 = simplify_gen_subreg (mode, op0, inner_mode, 0);
5341 else if (GET_MODE_UNIT_BITSIZE (mode)
5342 < GET_MODE_UNIT_BITSIZE (inner_mode))
5343 op0 = simplify_gen_unary (FLOAT_TRUNCATE, mode, op0, inner_mode);
5344 else
5345 op0 = simplify_gen_unary (FLOAT_EXTEND, mode, op0, inner_mode);
5346 }
5347 else if (FLOAT_MODE_P (mode))
5348 gcc_unreachable ();
5349 else if (FLOAT_MODE_P (inner_mode))
5350 {
5351 if (TYPE_UNSIGNED (TREE_TYPE (exp)))
5352 op0 = simplify_gen_unary (UNSIGNED_FIX, mode, op0, inner_mode);
5353 else
5354 op0 = simplify_gen_unary (FIX, mode, op0, inner_mode);
5355 }
5356 else if (GET_MODE_UNIT_PRECISION (mode)
5357 == GET_MODE_UNIT_PRECISION (inner_mode))
5358 op0 = lowpart_subreg (mode, op0, inner_mode);
5359 else if (GET_MODE_UNIT_PRECISION (mode)
5360 < GET_MODE_UNIT_PRECISION (inner_mode))
5361 op0 = simplify_gen_unary (TRUNCATE, mode, op0, inner_mode);
5362 else if (TYPE_UNSIGNED (TREE_TYPE (exp)))
5363 op0 = simplify_gen_unary (ZERO_EXTEND, mode, op0, inner_mode);
5364 else
5365 op0 = simplify_gen_unary (SIGN_EXTEND, mode, op0, inner_mode);
5366
5367 return op0;
5368 }
5369
5370 /* Ensure INSN_VAR_LOCATION_LOC (insn) doesn't have unbound complexity.
5371 Allow 4 levels of rtl nesting for most rtl codes, and if we see anything
5372 deeper than that, create DEBUG_EXPRs and emit DEBUG_INSNs before INSN. */
5373
5374 static void
5375 avoid_complex_debug_insns (rtx_insn *insn, rtx *exp_p, int depth)
5376 {
5377 rtx exp = *exp_p;
5378
5379 if (exp == NULL_RTX)
5380 return;
5381
5382 if ((OBJECT_P (exp) && !MEM_P (exp)) || GET_CODE (exp) == CLOBBER)
5383 return;
5384
5385 if (depth == 4)
5386 {
5387 /* Create DEBUG_EXPR (and DEBUG_EXPR_DECL). */
5388 rtx dval = make_debug_expr_from_rtl (exp);
5389
5390 /* Emit a debug bind insn before INSN. */
5391 rtx bind = gen_rtx_VAR_LOCATION (GET_MODE (exp),
5392 DEBUG_EXPR_TREE_DECL (dval), exp,
5393 VAR_INIT_STATUS_INITIALIZED);
5394
5395 emit_debug_insn_before (bind, insn);
5396 *exp_p = dval;
5397 return;
5398 }
5399
5400 const char *format_ptr = GET_RTX_FORMAT (GET_CODE (exp));
5401 int i, j;
5402 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (exp)); i++)
5403 switch (*format_ptr++)
5404 {
5405 case 'e':
5406 avoid_complex_debug_insns (insn, &XEXP (exp, i), depth + 1);
5407 break;
5408
5409 case 'E':
5410 case 'V':
5411 for (j = 0; j < XVECLEN (exp, i); j++)
5412 avoid_complex_debug_insns (insn, &XVECEXP (exp, i, j), depth + 1);
5413 break;
5414
5415 default:
5416 break;
5417 }
5418 }
5419
5420 /* Expand the _LOCs in debug insns. We run this after expanding all
5421 regular insns, so that any variables referenced in the function
5422 will have their DECL_RTLs set. */
5423
5424 static void
5425 expand_debug_locations (void)
5426 {
5427 rtx_insn *insn;
5428 rtx_insn *last = get_last_insn ();
5429 int save_strict_alias = flag_strict_aliasing;
5430
5431 /* New alias sets while setting up memory attributes cause
5432 -fcompare-debug failures, even though it doesn't bring about any
5433 codegen changes. */
5434 flag_strict_aliasing = 0;
5435
5436 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5437 if (DEBUG_BIND_INSN_P (insn))
5438 {
5439 tree value = (tree)INSN_VAR_LOCATION_LOC (insn);
5440 rtx val;
5441 rtx_insn *prev_insn, *insn2;
5442 machine_mode mode;
5443
5444 if (value == NULL_TREE)
5445 val = NULL_RTX;
5446 else
5447 {
5448 if (INSN_VAR_LOCATION_STATUS (insn)
5449 == VAR_INIT_STATUS_UNINITIALIZED)
5450 val = expand_debug_source_expr (value);
5451 /* The avoid_deep_ter_for_debug function inserts
5452 debug bind stmts after SSA_NAME definition, with the
5453 SSA_NAME as the whole bind location. Disable temporarily
5454 expansion of that SSA_NAME into the DEBUG_EXPR_DECL
5455 being defined in this DEBUG_INSN. */
5456 else if (deep_ter_debug_map && TREE_CODE (value) == SSA_NAME)
5457 {
5458 tree *slot = deep_ter_debug_map->get (value);
5459 if (slot)
5460 {
5461 if (*slot == INSN_VAR_LOCATION_DECL (insn))
5462 *slot = NULL_TREE;
5463 else
5464 slot = NULL;
5465 }
5466 val = expand_debug_expr (value);
5467 if (slot)
5468 *slot = INSN_VAR_LOCATION_DECL (insn);
5469 }
5470 else
5471 val = expand_debug_expr (value);
5472 gcc_assert (last == get_last_insn ());
5473 }
5474
5475 if (!val)
5476 val = gen_rtx_UNKNOWN_VAR_LOC ();
5477 else
5478 {
5479 mode = GET_MODE (INSN_VAR_LOCATION (insn));
5480
5481 gcc_assert (mode == GET_MODE (val)
5482 || (GET_MODE (val) == VOIDmode
5483 && (CONST_SCALAR_INT_P (val)
5484 || GET_CODE (val) == CONST_FIXED
5485 || GET_CODE (val) == LABEL_REF)));
5486 }
5487
5488 INSN_VAR_LOCATION_LOC (insn) = val;
5489 prev_insn = PREV_INSN (insn);
5490 for (insn2 = insn; insn2 != prev_insn; insn2 = PREV_INSN (insn2))
5491 avoid_complex_debug_insns (insn2, &INSN_VAR_LOCATION_LOC (insn2), 0);
5492 }
5493
5494 flag_strict_aliasing = save_strict_alias;
5495 }
5496
5497 /* Performs swapping operands of commutative operations to expand
5498 the expensive one first. */
5499
5500 static void
5501 reorder_operands (basic_block bb)
5502 {
5503 unsigned int *lattice; /* Hold cost of each statement. */
5504 unsigned int i = 0, n = 0;
5505 gimple_stmt_iterator gsi;
5506 gimple_seq stmts;
5507 gimple *stmt;
5508 bool swap;
5509 tree op0, op1;
5510 ssa_op_iter iter;
5511 use_operand_p use_p;
5512 gimple *def0, *def1;
5513
5514 /* Compute cost of each statement using estimate_num_insns. */
5515 stmts = bb_seq (bb);
5516 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
5517 {
5518 stmt = gsi_stmt (gsi);
5519 if (!is_gimple_debug (stmt))
5520 gimple_set_uid (stmt, n++);
5521 }
5522 lattice = XNEWVEC (unsigned int, n);
5523 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
5524 {
5525 unsigned cost;
5526 stmt = gsi_stmt (gsi);
5527 if (is_gimple_debug (stmt))
5528 continue;
5529 cost = estimate_num_insns (stmt, &eni_size_weights);
5530 lattice[i] = cost;
5531 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
5532 {
5533 tree use = USE_FROM_PTR (use_p);
5534 gimple *def_stmt;
5535 if (TREE_CODE (use) != SSA_NAME)
5536 continue;
5537 def_stmt = get_gimple_for_ssa_name (use);
5538 if (!def_stmt)
5539 continue;
5540 lattice[i] += lattice[gimple_uid (def_stmt)];
5541 }
5542 i++;
5543 if (!is_gimple_assign (stmt)
5544 || !commutative_tree_code (gimple_assign_rhs_code (stmt)))
5545 continue;
5546 op0 = gimple_op (stmt, 1);
5547 op1 = gimple_op (stmt, 2);
5548 if (TREE_CODE (op0) != SSA_NAME
5549 || TREE_CODE (op1) != SSA_NAME)
5550 continue;
5551 /* Swap operands if the second one is more expensive. */
5552 def0 = get_gimple_for_ssa_name (op0);
5553 def1 = get_gimple_for_ssa_name (op1);
5554 if (!def1)
5555 continue;
5556 swap = false;
5557 if (!def0 || lattice[gimple_uid (def1)] > lattice[gimple_uid (def0)])
5558 swap = true;
5559 if (swap)
5560 {
5561 if (dump_file && (dump_flags & TDF_DETAILS))
5562 {
5563 fprintf (dump_file, "Swap operands in stmt:\n");
5564 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
5565 fprintf (dump_file, "Cost left opnd=%d, right opnd=%d\n",
5566 def0 ? lattice[gimple_uid (def0)] : 0,
5567 lattice[gimple_uid (def1)]);
5568 }
5569 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
5570 gimple_assign_rhs2_ptr (stmt));
5571 }
5572 }
5573 XDELETE (lattice);
5574 }
5575
5576 /* Expand basic block BB from GIMPLE trees to RTL. */
5577
5578 static basic_block
5579 expand_gimple_basic_block (basic_block bb, bool disable_tail_calls)
5580 {
5581 gimple_stmt_iterator gsi;
5582 gimple_seq stmts;
5583 gimple *stmt = NULL;
5584 rtx_note *note = NULL;
5585 rtx_insn *last;
5586 edge e;
5587 edge_iterator ei;
5588
5589 if (dump_file)
5590 fprintf (dump_file, "\n;; Generating RTL for gimple basic block %d\n",
5591 bb->index);
5592
5593 /* Note that since we are now transitioning from GIMPLE to RTL, we
5594 cannot use the gsi_*_bb() routines because they expect the basic
5595 block to be in GIMPLE, instead of RTL. Therefore, we need to
5596 access the BB sequence directly. */
5597 if (optimize)
5598 reorder_operands (bb);
5599 stmts = bb_seq (bb);
5600 bb->il.gimple.seq = NULL;
5601 bb->il.gimple.phi_nodes = NULL;
5602 rtl_profile_for_bb (bb);
5603 init_rtl_bb_info (bb);
5604 bb->flags |= BB_RTL;
5605
5606 /* Remove the RETURN_EXPR if we may fall though to the exit
5607 instead. */
5608 gsi = gsi_last (stmts);
5609 if (!gsi_end_p (gsi)
5610 && gimple_code (gsi_stmt (gsi)) == GIMPLE_RETURN)
5611 {
5612 greturn *ret_stmt = as_a <greturn *> (gsi_stmt (gsi));
5613
5614 gcc_assert (single_succ_p (bb));
5615 gcc_assert (single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun));
5616
5617 if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
5618 && !gimple_return_retval (ret_stmt))
5619 {
5620 gsi_remove (&gsi, false);
5621 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
5622 }
5623 }
5624
5625 gsi = gsi_start (stmts);
5626 if (!gsi_end_p (gsi))
5627 {
5628 stmt = gsi_stmt (gsi);
5629 if (gimple_code (stmt) != GIMPLE_LABEL)
5630 stmt = NULL;
5631 }
5632
5633 rtx_code_label **elt = lab_rtx_for_bb->get (bb);
5634
5635 if (stmt || elt)
5636 {
5637 gcc_checking_assert (!note);
5638 last = get_last_insn ();
5639
5640 if (stmt)
5641 {
5642 expand_gimple_stmt (stmt);
5643 gsi_next (&gsi);
5644 }
5645
5646 if (elt)
5647 emit_label (*elt);
5648
5649 BB_HEAD (bb) = NEXT_INSN (last);
5650 if (NOTE_P (BB_HEAD (bb)))
5651 BB_HEAD (bb) = NEXT_INSN (BB_HEAD (bb));
5652 gcc_assert (LABEL_P (BB_HEAD (bb)));
5653 note = emit_note_after (NOTE_INSN_BASIC_BLOCK, BB_HEAD (bb));
5654
5655 maybe_dump_rtl_for_gimple_stmt (stmt, last);
5656 }
5657 else
5658 BB_HEAD (bb) = note = emit_note (NOTE_INSN_BASIC_BLOCK);
5659
5660 if (note)
5661 NOTE_BASIC_BLOCK (note) = bb;
5662
5663 for (; !gsi_end_p (gsi); gsi_next (&gsi))
5664 {
5665 basic_block new_bb;
5666
5667 stmt = gsi_stmt (gsi);
5668
5669 /* If this statement is a non-debug one, and we generate debug
5670 insns, then this one might be the last real use of a TERed
5671 SSA_NAME, but where there are still some debug uses further
5672 down. Expanding the current SSA name in such further debug
5673 uses by their RHS might lead to wrong debug info, as coalescing
5674 might make the operands of such RHS be placed into the same
5675 pseudo as something else. Like so:
5676 a_1 = a_0 + 1; // Assume a_1 is TERed and a_0 is dead
5677 use(a_1);
5678 a_2 = ...
5679 #DEBUG ... => a_1
5680 As a_0 and a_2 don't overlap in lifetime, assume they are coalesced.
5681 If we now would expand a_1 by it's RHS (a_0 + 1) in the debug use,
5682 the write to a_2 would actually have clobbered the place which
5683 formerly held a_0.
5684
5685 So, instead of that, we recognize the situation, and generate
5686 debug temporaries at the last real use of TERed SSA names:
5687 a_1 = a_0 + 1;
5688 #DEBUG #D1 => a_1
5689 use(a_1);
5690 a_2 = ...
5691 #DEBUG ... => #D1
5692 */
5693 if (MAY_HAVE_DEBUG_BIND_INSNS
5694 && SA.values
5695 && !is_gimple_debug (stmt))
5696 {
5697 ssa_op_iter iter;
5698 tree op;
5699 gimple *def;
5700
5701 location_t sloc = curr_insn_location ();
5702
5703 /* Look for SSA names that have their last use here (TERed
5704 names always have only one real use). */
5705 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
5706 if ((def = get_gimple_for_ssa_name (op)))
5707 {
5708 imm_use_iterator imm_iter;
5709 use_operand_p use_p;
5710 bool have_debug_uses = false;
5711
5712 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
5713 {
5714 if (gimple_debug_bind_p (USE_STMT (use_p)))
5715 {
5716 have_debug_uses = true;
5717 break;
5718 }
5719 }
5720
5721 if (have_debug_uses)
5722 {
5723 /* OP is a TERed SSA name, with DEF its defining
5724 statement, and where OP is used in further debug
5725 instructions. Generate a debug temporary, and
5726 replace all uses of OP in debug insns with that
5727 temporary. */
5728 gimple *debugstmt;
5729 tree value = gimple_assign_rhs_to_tree (def);
5730 tree vexpr = make_node (DEBUG_EXPR_DECL);
5731 rtx val;
5732 machine_mode mode;
5733
5734 set_curr_insn_location (gimple_location (def));
5735
5736 DECL_ARTIFICIAL (vexpr) = 1;
5737 TREE_TYPE (vexpr) = TREE_TYPE (value);
5738 if (DECL_P (value))
5739 mode = DECL_MODE (value);
5740 else
5741 mode = TYPE_MODE (TREE_TYPE (value));
5742 SET_DECL_MODE (vexpr, mode);
5743
5744 val = gen_rtx_VAR_LOCATION
5745 (mode, vexpr, (rtx)value, VAR_INIT_STATUS_INITIALIZED);
5746
5747 emit_debug_insn (val);
5748
5749 FOR_EACH_IMM_USE_STMT (debugstmt, imm_iter, op)
5750 {
5751 if (!gimple_debug_bind_p (debugstmt))
5752 continue;
5753
5754 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5755 SET_USE (use_p, vexpr);
5756
5757 update_stmt (debugstmt);
5758 }
5759 }
5760 }
5761 set_curr_insn_location (sloc);
5762 }
5763
5764 currently_expanding_gimple_stmt = stmt;
5765
5766 /* Expand this statement, then evaluate the resulting RTL and
5767 fixup the CFG accordingly. */
5768 if (gimple_code (stmt) == GIMPLE_COND)
5769 {
5770 new_bb = expand_gimple_cond (bb, as_a <gcond *> (stmt));
5771 if (new_bb)
5772 return new_bb;
5773 }
5774 else if (is_gimple_debug (stmt))
5775 {
5776 location_t sloc = curr_insn_location ();
5777 gimple_stmt_iterator nsi = gsi;
5778
5779 for (;;)
5780 {
5781 tree var;
5782 tree value = NULL_TREE;
5783 rtx val = NULL_RTX;
5784 machine_mode mode;
5785
5786 if (!gimple_debug_nonbind_marker_p (stmt))
5787 {
5788 if (gimple_debug_bind_p (stmt))
5789 {
5790 var = gimple_debug_bind_get_var (stmt);
5791
5792 if (TREE_CODE (var) != DEBUG_EXPR_DECL
5793 && TREE_CODE (var) != LABEL_DECL
5794 && !target_for_debug_bind (var))
5795 goto delink_debug_stmt;
5796
5797 if (DECL_P (var))
5798 mode = DECL_MODE (var);
5799 else
5800 mode = TYPE_MODE (TREE_TYPE (var));
5801
5802 if (gimple_debug_bind_has_value_p (stmt))
5803 value = gimple_debug_bind_get_value (stmt);
5804
5805 val = gen_rtx_VAR_LOCATION
5806 (mode, var, (rtx)value, VAR_INIT_STATUS_INITIALIZED);
5807 }
5808 else if (gimple_debug_source_bind_p (stmt))
5809 {
5810 var = gimple_debug_source_bind_get_var (stmt);
5811
5812 value = gimple_debug_source_bind_get_value (stmt);
5813
5814 mode = DECL_MODE (var);
5815
5816 val = gen_rtx_VAR_LOCATION (mode, var, (rtx)value,
5817 VAR_INIT_STATUS_UNINITIALIZED);
5818 }
5819 else
5820 gcc_unreachable ();
5821 }
5822 /* If this function was first compiled with markers
5823 enabled, but they're now disable (e.g. LTO), drop
5824 them on the floor. */
5825 else if (gimple_debug_nonbind_marker_p (stmt)
5826 && !MAY_HAVE_DEBUG_MARKER_INSNS)
5827 goto delink_debug_stmt;
5828 else if (gimple_debug_begin_stmt_p (stmt))
5829 val = GEN_RTX_DEBUG_MARKER_BEGIN_STMT_PAT ();
5830 else if (gimple_debug_inline_entry_p (stmt))
5831 {
5832 tree block = gimple_block (stmt);
5833
5834 if (block)
5835 val = GEN_RTX_DEBUG_MARKER_INLINE_ENTRY_PAT ();
5836 else
5837 goto delink_debug_stmt;
5838 }
5839 else
5840 gcc_unreachable ();
5841
5842 last = get_last_insn ();
5843
5844 set_curr_insn_location (gimple_location (stmt));
5845
5846 emit_debug_insn (val);
5847
5848 if (dump_file && (dump_flags & TDF_DETAILS))
5849 {
5850 /* We can't dump the insn with a TREE where an RTX
5851 is expected. */
5852 if (GET_CODE (val) == VAR_LOCATION)
5853 {
5854 gcc_checking_assert (PAT_VAR_LOCATION_LOC (val) == (rtx)value);
5855 PAT_VAR_LOCATION_LOC (val) = const0_rtx;
5856 }
5857 maybe_dump_rtl_for_gimple_stmt (stmt, last);
5858 if (GET_CODE (val) == VAR_LOCATION)
5859 PAT_VAR_LOCATION_LOC (val) = (rtx)value;
5860 }
5861
5862 delink_debug_stmt:
5863 /* In order not to generate too many debug temporaries,
5864 we delink all uses of debug statements we already expanded.
5865 Therefore debug statements between definition and real
5866 use of TERed SSA names will continue to use the SSA name,
5867 and not be replaced with debug temps. */
5868 delink_stmt_imm_use (stmt);
5869
5870 gsi = nsi;
5871 gsi_next (&nsi);
5872 if (gsi_end_p (nsi))
5873 break;
5874 stmt = gsi_stmt (nsi);
5875 if (!is_gimple_debug (stmt))
5876 break;
5877 }
5878
5879 set_curr_insn_location (sloc);
5880 }
5881 else
5882 {
5883 gcall *call_stmt = dyn_cast <gcall *> (stmt);
5884 if (call_stmt
5885 && gimple_call_tail_p (call_stmt)
5886 && disable_tail_calls)
5887 gimple_call_set_tail (call_stmt, false);
5888
5889 if (call_stmt && gimple_call_tail_p (call_stmt))
5890 {
5891 bool can_fallthru;
5892 new_bb = expand_gimple_tailcall (bb, call_stmt, &can_fallthru);
5893 if (new_bb)
5894 {
5895 if (can_fallthru)
5896 bb = new_bb;
5897 else
5898 return new_bb;
5899 }
5900 }
5901 else
5902 {
5903 def_operand_p def_p;
5904 def_p = SINGLE_SSA_DEF_OPERAND (stmt, SSA_OP_DEF);
5905
5906 if (def_p != NULL)
5907 {
5908 /* Ignore this stmt if it is in the list of
5909 replaceable expressions. */
5910 if (SA.values
5911 && bitmap_bit_p (SA.values,
5912 SSA_NAME_VERSION (DEF_FROM_PTR (def_p))))
5913 continue;
5914 }
5915 last = expand_gimple_stmt (stmt);
5916 maybe_dump_rtl_for_gimple_stmt (stmt, last);
5917 }
5918 }
5919 }
5920
5921 currently_expanding_gimple_stmt = NULL;
5922
5923 /* Expand implicit goto and convert goto_locus. */
5924 FOR_EACH_EDGE (e, ei, bb->succs)
5925 {
5926 if (e->goto_locus != UNKNOWN_LOCATION)
5927 set_curr_insn_location (e->goto_locus);
5928 if ((e->flags & EDGE_FALLTHRU) && e->dest != bb->next_bb)
5929 {
5930 emit_jump (label_rtx_for_bb (e->dest));
5931 e->flags &= ~EDGE_FALLTHRU;
5932 }
5933 }
5934
5935 /* Expanded RTL can create a jump in the last instruction of block.
5936 This later might be assumed to be a jump to successor and break edge insertion.
5937 We need to insert dummy move to prevent this. PR41440. */
5938 if (single_succ_p (bb)
5939 && (single_succ_edge (bb)->flags & EDGE_FALLTHRU)
5940 && (last = get_last_insn ())
5941 && (JUMP_P (last)
5942 || (DEBUG_INSN_P (last)
5943 && JUMP_P (prev_nondebug_insn (last)))))
5944 {
5945 rtx dummy = gen_reg_rtx (SImode);
5946 emit_insn_after_noloc (gen_move_insn (dummy, dummy), last, NULL);
5947 }
5948
5949 do_pending_stack_adjust ();
5950
5951 /* Find the block tail. The last insn in the block is the insn
5952 before a barrier and/or table jump insn. */
5953 last = get_last_insn ();
5954 if (BARRIER_P (last))
5955 last = PREV_INSN (last);
5956 if (JUMP_TABLE_DATA_P (last))
5957 last = PREV_INSN (PREV_INSN (last));
5958 if (BARRIER_P (last))
5959 last = PREV_INSN (last);
5960 BB_END (bb) = last;
5961
5962 update_bb_for_insn (bb);
5963
5964 return bb;
5965 }
5966
5967
5968 /* Create a basic block for initialization code. */
5969
5970 static basic_block
5971 construct_init_block (void)
5972 {
5973 basic_block init_block, first_block;
5974 edge e = NULL;
5975 int flags;
5976
5977 /* Multiple entry points not supported yet. */
5978 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1);
5979 init_rtl_bb_info (ENTRY_BLOCK_PTR_FOR_FN (cfun));
5980 init_rtl_bb_info (EXIT_BLOCK_PTR_FOR_FN (cfun));
5981 ENTRY_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_RTL;
5982 EXIT_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_RTL;
5983
5984 e = EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0);
5985
5986 /* When entry edge points to first basic block, we don't need jump,
5987 otherwise we have to jump into proper target. */
5988 if (e && e->dest != ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
5989 {
5990 tree label = gimple_block_label (e->dest);
5991
5992 emit_jump (jump_target_rtx (label));
5993 flags = 0;
5994 }
5995 else
5996 flags = EDGE_FALLTHRU;
5997
5998 init_block = create_basic_block (NEXT_INSN (get_insns ()),
5999 get_last_insn (),
6000 ENTRY_BLOCK_PTR_FOR_FN (cfun));
6001 init_block->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
6002 add_bb_to_loop (init_block, ENTRY_BLOCK_PTR_FOR_FN (cfun)->loop_father);
6003 if (e)
6004 {
6005 first_block = e->dest;
6006 redirect_edge_succ (e, init_block);
6007 make_single_succ_edge (init_block, first_block, flags);
6008 }
6009 else
6010 make_single_succ_edge (init_block, EXIT_BLOCK_PTR_FOR_FN (cfun),
6011 EDGE_FALLTHRU);
6012
6013 update_bb_for_insn (init_block);
6014 return init_block;
6015 }
6016
6017 /* For each lexical block, set BLOCK_NUMBER to the depth at which it is
6018 found in the block tree. */
6019
6020 static void
6021 set_block_levels (tree block, int level)
6022 {
6023 while (block)
6024 {
6025 BLOCK_NUMBER (block) = level;
6026 set_block_levels (BLOCK_SUBBLOCKS (block), level + 1);
6027 block = BLOCK_CHAIN (block);
6028 }
6029 }
6030
6031 /* Create a block containing landing pads and similar stuff. */
6032
6033 static void
6034 construct_exit_block (void)
6035 {
6036 rtx_insn *head = get_last_insn ();
6037 rtx_insn *end;
6038 basic_block exit_block;
6039 edge e, e2;
6040 unsigned ix;
6041 edge_iterator ei;
6042 basic_block prev_bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
6043 rtx_insn *orig_end = BB_END (prev_bb);
6044
6045 rtl_profile_for_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
6046
6047 /* Make sure the locus is set to the end of the function, so that
6048 epilogue line numbers and warnings are set properly. */
6049 if (LOCATION_LOCUS (cfun->function_end_locus) != UNKNOWN_LOCATION)
6050 input_location = cfun->function_end_locus;
6051
6052 /* Generate rtl for function exit. */
6053 expand_function_end ();
6054
6055 end = get_last_insn ();
6056 if (head == end)
6057 return;
6058 /* While emitting the function end we could move end of the last basic
6059 block. */
6060 BB_END (prev_bb) = orig_end;
6061 while (NEXT_INSN (head) && NOTE_P (NEXT_INSN (head)))
6062 head = NEXT_INSN (head);
6063 /* But make sure exit_block starts with RETURN_LABEL, otherwise the
6064 bb count counting will be confused. Any instructions before that
6065 label are emitted for the case where PREV_BB falls through into the
6066 exit block, so append those instructions to prev_bb in that case. */
6067 if (NEXT_INSN (head) != return_label)
6068 {
6069 while (NEXT_INSN (head) != return_label)
6070 {
6071 if (!NOTE_P (NEXT_INSN (head)))
6072 BB_END (prev_bb) = NEXT_INSN (head);
6073 head = NEXT_INSN (head);
6074 }
6075 }
6076 exit_block = create_basic_block (NEXT_INSN (head), end, prev_bb);
6077 exit_block->count = EXIT_BLOCK_PTR_FOR_FN (cfun)->count;
6078 add_bb_to_loop (exit_block, EXIT_BLOCK_PTR_FOR_FN (cfun)->loop_father);
6079
6080 ix = 0;
6081 while (ix < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds))
6082 {
6083 e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), ix);
6084 if (!(e->flags & EDGE_ABNORMAL))
6085 redirect_edge_succ (e, exit_block);
6086 else
6087 ix++;
6088 }
6089
6090 e = make_single_succ_edge (exit_block, EXIT_BLOCK_PTR_FOR_FN (cfun),
6091 EDGE_FALLTHRU);
6092 FOR_EACH_EDGE (e2, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
6093 if (e2 != e)
6094 {
6095 exit_block->count -= e2->count ();
6096 }
6097 update_bb_for_insn (exit_block);
6098 }
6099
6100 /* Helper function for discover_nonconstant_array_refs.
6101 Look for ARRAY_REF nodes with non-constant indexes and mark them
6102 addressable. */
6103
6104 static tree
6105 discover_nonconstant_array_refs_r (tree * tp, int *walk_subtrees,
6106 void *data ATTRIBUTE_UNUSED)
6107 {
6108 tree t = *tp;
6109
6110 if (IS_TYPE_OR_DECL_P (t))
6111 *walk_subtrees = 0;
6112 else if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
6113 {
6114 while (((TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
6115 && is_gimple_min_invariant (TREE_OPERAND (t, 1))
6116 && (!TREE_OPERAND (t, 2)
6117 || is_gimple_min_invariant (TREE_OPERAND (t, 2))))
6118 || (TREE_CODE (t) == COMPONENT_REF
6119 && (!TREE_OPERAND (t,2)
6120 || is_gimple_min_invariant (TREE_OPERAND (t, 2))))
6121 || TREE_CODE (t) == BIT_FIELD_REF
6122 || TREE_CODE (t) == REALPART_EXPR
6123 || TREE_CODE (t) == IMAGPART_EXPR
6124 || TREE_CODE (t) == VIEW_CONVERT_EXPR
6125 || CONVERT_EXPR_P (t))
6126 t = TREE_OPERAND (t, 0);
6127
6128 if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
6129 {
6130 t = get_base_address (t);
6131 if (t && DECL_P (t)
6132 && DECL_MODE (t) != BLKmode)
6133 TREE_ADDRESSABLE (t) = 1;
6134 }
6135
6136 *walk_subtrees = 0;
6137 }
6138
6139 return NULL_TREE;
6140 }
6141
6142 /* RTL expansion is not able to compile array references with variable
6143 offsets for arrays stored in single register. Discover such
6144 expressions and mark variables as addressable to avoid this
6145 scenario. */
6146
6147 static void
6148 discover_nonconstant_array_refs (void)
6149 {
6150 basic_block bb;
6151 gimple_stmt_iterator gsi;
6152
6153 FOR_EACH_BB_FN (bb, cfun)
6154 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6155 {
6156 gimple *stmt = gsi_stmt (gsi);
6157 if (!is_gimple_debug (stmt))
6158 {
6159 walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL);
6160 gcall *call = dyn_cast <gcall *> (stmt);
6161 if (call && gimple_call_internal_p (call))
6162 switch (gimple_call_internal_fn (call))
6163 {
6164 case IFN_LOAD_LANES:
6165 /* The source must be a MEM. */
6166 mark_addressable (gimple_call_arg (call, 0));
6167 break;
6168 case IFN_STORE_LANES:
6169 /* The destination must be a MEM. */
6170 mark_addressable (gimple_call_lhs (call));
6171 break;
6172 default:
6173 break;
6174 }
6175 }
6176 }
6177 }
6178
6179 /* This function sets crtl->args.internal_arg_pointer to a virtual
6180 register if DRAP is needed. Local register allocator will replace
6181 virtual_incoming_args_rtx with the virtual register. */
6182
6183 static void
6184 expand_stack_alignment (void)
6185 {
6186 rtx drap_rtx;
6187 unsigned int preferred_stack_boundary;
6188
6189 if (! SUPPORTS_STACK_ALIGNMENT)
6190 return;
6191
6192 if (cfun->calls_alloca
6193 || cfun->has_nonlocal_label
6194 || crtl->has_nonlocal_goto)
6195 crtl->need_drap = true;
6196
6197 /* Call update_stack_boundary here again to update incoming stack
6198 boundary. It may set incoming stack alignment to a different
6199 value after RTL expansion. TARGET_FUNCTION_OK_FOR_SIBCALL may
6200 use the minimum incoming stack alignment to check if it is OK
6201 to perform sibcall optimization since sibcall optimization will
6202 only align the outgoing stack to incoming stack boundary. */
6203 if (targetm.calls.update_stack_boundary)
6204 targetm.calls.update_stack_boundary ();
6205
6206 /* The incoming stack frame has to be aligned at least at
6207 parm_stack_boundary. */
6208 gcc_assert (crtl->parm_stack_boundary <= INCOMING_STACK_BOUNDARY);
6209
6210 /* Update crtl->stack_alignment_estimated and use it later to align
6211 stack. We check PREFERRED_STACK_BOUNDARY if there may be non-call
6212 exceptions since callgraph doesn't collect incoming stack alignment
6213 in this case. */
6214 if (cfun->can_throw_non_call_exceptions
6215 && PREFERRED_STACK_BOUNDARY > crtl->preferred_stack_boundary)
6216 preferred_stack_boundary = PREFERRED_STACK_BOUNDARY;
6217 else
6218 preferred_stack_boundary = crtl->preferred_stack_boundary;
6219 if (preferred_stack_boundary > crtl->stack_alignment_estimated)
6220 crtl->stack_alignment_estimated = preferred_stack_boundary;
6221 if (preferred_stack_boundary > crtl->stack_alignment_needed)
6222 crtl->stack_alignment_needed = preferred_stack_boundary;
6223
6224 gcc_assert (crtl->stack_alignment_needed
6225 <= crtl->stack_alignment_estimated);
6226
6227 crtl->stack_realign_needed
6228 = INCOMING_STACK_BOUNDARY < crtl->stack_alignment_estimated;
6229 crtl->stack_realign_tried = crtl->stack_realign_needed;
6230
6231 crtl->stack_realign_processed = true;
6232
6233 /* Target has to redefine TARGET_GET_DRAP_RTX to support stack
6234 alignment. */
6235 gcc_assert (targetm.calls.get_drap_rtx != NULL);
6236 drap_rtx = targetm.calls.get_drap_rtx ();
6237
6238 /* stack_realign_drap and drap_rtx must match. */
6239 gcc_assert ((stack_realign_drap != 0) == (drap_rtx != NULL));
6240
6241 /* Do nothing if NULL is returned, which means DRAP is not needed. */
6242 if (drap_rtx != NULL)
6243 {
6244 crtl->args.internal_arg_pointer = drap_rtx;
6245
6246 /* Call fixup_tail_calls to clean up REG_EQUIV note if DRAP is
6247 needed. */
6248 fixup_tail_calls ();
6249 }
6250 }
6251 \f
6252
6253 static void
6254 expand_main_function (void)
6255 {
6256 #if (defined(INVOKE__main) \
6257 || (!defined(HAS_INIT_SECTION) \
6258 && !defined(INIT_SECTION_ASM_OP) \
6259 && !defined(INIT_ARRAY_SECTION_ASM_OP)))
6260 emit_library_call (init_one_libfunc (NAME__MAIN), LCT_NORMAL, VOIDmode);
6261 #endif
6262 }
6263 \f
6264
6265 /* Expand code to initialize the stack_protect_guard. This is invoked at
6266 the beginning of a function to be protected. */
6267
6268 static void
6269 stack_protect_prologue (void)
6270 {
6271 tree guard_decl = targetm.stack_protect_guard ();
6272 rtx x, y;
6273
6274 crtl->stack_protect_guard_decl = guard_decl;
6275 x = expand_normal (crtl->stack_protect_guard);
6276
6277 if (targetm.have_stack_protect_combined_set () && guard_decl)
6278 {
6279 gcc_assert (DECL_P (guard_decl));
6280 y = DECL_RTL (guard_decl);
6281
6282 /* Allow the target to compute address of Y and copy it to X without
6283 leaking Y into a register. This combined address + copy pattern
6284 allows the target to prevent spilling of any intermediate results by
6285 splitting it after register allocator. */
6286 if (rtx_insn *insn = targetm.gen_stack_protect_combined_set (x, y))
6287 {
6288 emit_insn (insn);
6289 return;
6290 }
6291 }
6292
6293 if (guard_decl)
6294 y = expand_normal (guard_decl);
6295 else
6296 y = const0_rtx;
6297
6298 /* Allow the target to copy from Y to X without leaking Y into a
6299 register. */
6300 if (targetm.have_stack_protect_set ())
6301 if (rtx_insn *insn = targetm.gen_stack_protect_set (x, y))
6302 {
6303 emit_insn (insn);
6304 return;
6305 }
6306
6307 /* Otherwise do a straight move. */
6308 emit_move_insn (x, y);
6309 }
6310
6311 /* Translate the intermediate representation contained in the CFG
6312 from GIMPLE trees to RTL.
6313
6314 We do conversion per basic block and preserve/update the tree CFG.
6315 This implies we have to do some magic as the CFG can simultaneously
6316 consist of basic blocks containing RTL and GIMPLE trees. This can
6317 confuse the CFG hooks, so be careful to not manipulate CFG during
6318 the expansion. */
6319
6320 namespace {
6321
6322 const pass_data pass_data_expand =
6323 {
6324 RTL_PASS, /* type */
6325 "expand", /* name */
6326 OPTGROUP_NONE, /* optinfo_flags */
6327 TV_EXPAND, /* tv_id */
6328 ( PROP_ssa | PROP_gimple_leh | PROP_cfg
6329 | PROP_gimple_lcx
6330 | PROP_gimple_lvec
6331 | PROP_gimple_lva), /* properties_required */
6332 PROP_rtl, /* properties_provided */
6333 ( PROP_ssa | PROP_trees ), /* properties_destroyed */
6334 0, /* todo_flags_start */
6335 0, /* todo_flags_finish */
6336 };
6337
6338 class pass_expand : public rtl_opt_pass
6339 {
6340 public:
6341 pass_expand (gcc::context *ctxt)
6342 : rtl_opt_pass (pass_data_expand, ctxt)
6343 {}
6344
6345 /* opt_pass methods: */
6346 virtual unsigned int execute (function *);
6347
6348 }; // class pass_expand
6349
6350 unsigned int
6351 pass_expand::execute (function *fun)
6352 {
6353 basic_block bb, init_block;
6354 edge_iterator ei;
6355 edge e;
6356 rtx_insn *var_seq, *var_ret_seq;
6357 unsigned i;
6358
6359 timevar_push (TV_OUT_OF_SSA);
6360 rewrite_out_of_ssa (&SA);
6361 timevar_pop (TV_OUT_OF_SSA);
6362 SA.partition_to_pseudo = XCNEWVEC (rtx, SA.map->num_partitions);
6363
6364 if (MAY_HAVE_DEBUG_BIND_STMTS && flag_tree_ter)
6365 {
6366 gimple_stmt_iterator gsi;
6367 FOR_EACH_BB_FN (bb, cfun)
6368 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6369 if (gimple_debug_bind_p (gsi_stmt (gsi)))
6370 avoid_deep_ter_for_debug (gsi_stmt (gsi), 0);
6371 }
6372
6373 /* Mark arrays indexed with non-constant indices with TREE_ADDRESSABLE. */
6374 discover_nonconstant_array_refs ();
6375
6376 /* Make sure all values used by the optimization passes have sane
6377 defaults. */
6378 reg_renumber = 0;
6379
6380 /* Some backends want to know that we are expanding to RTL. */
6381 currently_expanding_to_rtl = 1;
6382 /* Dominators are not kept up-to-date as we may create new basic-blocks. */
6383 free_dominance_info (CDI_DOMINATORS);
6384
6385 rtl_profile_for_bb (ENTRY_BLOCK_PTR_FOR_FN (fun));
6386
6387 insn_locations_init ();
6388 if (!DECL_IS_BUILTIN (current_function_decl))
6389 {
6390 /* Eventually, all FEs should explicitly set function_start_locus. */
6391 if (LOCATION_LOCUS (fun->function_start_locus) == UNKNOWN_LOCATION)
6392 set_curr_insn_location
6393 (DECL_SOURCE_LOCATION (current_function_decl));
6394 else
6395 set_curr_insn_location (fun->function_start_locus);
6396 }
6397 else
6398 set_curr_insn_location (UNKNOWN_LOCATION);
6399 prologue_location = curr_insn_location ();
6400
6401 #ifdef INSN_SCHEDULING
6402 init_sched_attrs ();
6403 #endif
6404
6405 /* Make sure first insn is a note even if we don't want linenums.
6406 This makes sure the first insn will never be deleted.
6407 Also, final expects a note to appear there. */
6408 emit_note (NOTE_INSN_DELETED);
6409
6410 targetm.expand_to_rtl_hook ();
6411 crtl->init_stack_alignment ();
6412 fun->cfg->max_jumptable_ents = 0;
6413
6414 /* Resovle the function section. Some targets, like ARM EABI rely on knowledge
6415 of the function section at exapnsion time to predict distance of calls. */
6416 resolve_unique_section (current_function_decl, 0, flag_function_sections);
6417
6418 /* Expand the variables recorded during gimple lowering. */
6419 timevar_push (TV_VAR_EXPAND);
6420 start_sequence ();
6421
6422 var_ret_seq = expand_used_vars ();
6423
6424 var_seq = get_insns ();
6425 end_sequence ();
6426 timevar_pop (TV_VAR_EXPAND);
6427
6428 /* Honor stack protection warnings. */
6429 if (warn_stack_protect)
6430 {
6431 if (fun->calls_alloca)
6432 warning (OPT_Wstack_protector,
6433 "stack protector not protecting local variables: "
6434 "variable length buffer");
6435 if (has_short_buffer && !crtl->stack_protect_guard)
6436 warning (OPT_Wstack_protector,
6437 "stack protector not protecting function: "
6438 "all local arrays are less than %d bytes long",
6439 (int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE));
6440 }
6441
6442 /* Set up parameters and prepare for return, for the function. */
6443 expand_function_start (current_function_decl);
6444
6445 /* If we emitted any instructions for setting up the variables,
6446 emit them before the FUNCTION_START note. */
6447 if (var_seq)
6448 {
6449 emit_insn_before (var_seq, parm_birth_insn);
6450
6451 /* In expand_function_end we'll insert the alloca save/restore
6452 before parm_birth_insn. We've just insertted an alloca call.
6453 Adjust the pointer to match. */
6454 parm_birth_insn = var_seq;
6455 }
6456
6457 /* Now propagate the RTL assignment of each partition to the
6458 underlying var of each SSA_NAME. */
6459 tree name;
6460
6461 FOR_EACH_SSA_NAME (i, name, cfun)
6462 {
6463 /* We might have generated new SSA names in
6464 update_alias_info_with_stack_vars. They will have a NULL
6465 defining statements, and won't be part of the partitioning,
6466 so ignore those. */
6467 if (!SSA_NAME_DEF_STMT (name))
6468 continue;
6469
6470 adjust_one_expanded_partition_var (name);
6471 }
6472
6473 /* Clean up RTL of variables that straddle across multiple
6474 partitions, and check that the rtl of any PARM_DECLs that are not
6475 cleaned up is that of their default defs. */
6476 FOR_EACH_SSA_NAME (i, name, cfun)
6477 {
6478 int part;
6479
6480 /* We might have generated new SSA names in
6481 update_alias_info_with_stack_vars. They will have a NULL
6482 defining statements, and won't be part of the partitioning,
6483 so ignore those. */
6484 if (!SSA_NAME_DEF_STMT (name))
6485 continue;
6486 part = var_to_partition (SA.map, name);
6487 if (part == NO_PARTITION)
6488 continue;
6489
6490 /* If this decl was marked as living in multiple places, reset
6491 this now to NULL. */
6492 tree var = SSA_NAME_VAR (name);
6493 if (var && DECL_RTL_IF_SET (var) == pc_rtx)
6494 SET_DECL_RTL (var, NULL);
6495 /* Check that the pseudos chosen by assign_parms are those of
6496 the corresponding default defs. */
6497 else if (SSA_NAME_IS_DEFAULT_DEF (name)
6498 && (TREE_CODE (var) == PARM_DECL
6499 || TREE_CODE (var) == RESULT_DECL))
6500 {
6501 rtx in = DECL_RTL_IF_SET (var);
6502 gcc_assert (in);
6503 rtx out = SA.partition_to_pseudo[part];
6504 gcc_assert (in == out);
6505
6506 /* Now reset VAR's RTL to IN, so that the _EXPR attrs match
6507 those expected by debug backends for each parm and for
6508 the result. This is particularly important for stabs,
6509 whose register elimination from parm's DECL_RTL may cause
6510 -fcompare-debug differences as SET_DECL_RTL changes reg's
6511 attrs. So, make sure the RTL already has the parm as the
6512 EXPR, so that it won't change. */
6513 SET_DECL_RTL (var, NULL_RTX);
6514 if (MEM_P (in))
6515 set_mem_attributes (in, var, true);
6516 SET_DECL_RTL (var, in);
6517 }
6518 }
6519
6520 /* If this function is `main', emit a call to `__main'
6521 to run global initializers, etc. */
6522 if (DECL_NAME (current_function_decl)
6523 && MAIN_NAME_P (DECL_NAME (current_function_decl))
6524 && DECL_FILE_SCOPE_P (current_function_decl))
6525 expand_main_function ();
6526
6527 /* Initialize the stack_protect_guard field. This must happen after the
6528 call to __main (if any) so that the external decl is initialized. */
6529 if (crtl->stack_protect_guard && targetm.stack_protect_runtime_enabled_p ())
6530 stack_protect_prologue ();
6531
6532 expand_phi_nodes (&SA);
6533
6534 /* Release any stale SSA redirection data. */
6535 redirect_edge_var_map_empty ();
6536
6537 /* Register rtl specific functions for cfg. */
6538 rtl_register_cfg_hooks ();
6539
6540 init_block = construct_init_block ();
6541
6542 /* Clear EDGE_EXECUTABLE on the entry edge(s). It is cleaned from the
6543 remaining edges later. */
6544 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (fun)->succs)
6545 e->flags &= ~EDGE_EXECUTABLE;
6546
6547 /* If the function has too many markers, drop them while expanding. */
6548 if (cfun->debug_marker_count
6549 >= PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT))
6550 cfun->debug_nonbind_markers = false;
6551
6552 lab_rtx_for_bb = new hash_map<basic_block, rtx_code_label *>;
6553 FOR_BB_BETWEEN (bb, init_block->next_bb, EXIT_BLOCK_PTR_FOR_FN (fun),
6554 next_bb)
6555 bb = expand_gimple_basic_block (bb, var_ret_seq != NULL_RTX);
6556
6557 if (MAY_HAVE_DEBUG_BIND_INSNS)
6558 expand_debug_locations ();
6559
6560 if (deep_ter_debug_map)
6561 {
6562 delete deep_ter_debug_map;
6563 deep_ter_debug_map = NULL;
6564 }
6565
6566 /* Free stuff we no longer need after GIMPLE optimizations. */
6567 free_dominance_info (CDI_DOMINATORS);
6568 free_dominance_info (CDI_POST_DOMINATORS);
6569 delete_tree_cfg_annotations (fun);
6570
6571 timevar_push (TV_OUT_OF_SSA);
6572 finish_out_of_ssa (&SA);
6573 timevar_pop (TV_OUT_OF_SSA);
6574
6575 timevar_push (TV_POST_EXPAND);
6576 /* We are no longer in SSA form. */
6577 fun->gimple_df->in_ssa_p = false;
6578 loops_state_clear (LOOP_CLOSED_SSA);
6579
6580 /* Expansion is used by optimization passes too, set maybe_hot_insn_p
6581 conservatively to true until they are all profile aware. */
6582 delete lab_rtx_for_bb;
6583 free_histograms (fun);
6584
6585 construct_exit_block ();
6586 insn_locations_finalize ();
6587
6588 if (var_ret_seq)
6589 {
6590 rtx_insn *after = return_label;
6591 rtx_insn *next = NEXT_INSN (after);
6592 if (next && NOTE_INSN_BASIC_BLOCK_P (next))
6593 after = next;
6594 emit_insn_after (var_ret_seq, after);
6595 }
6596
6597 /* Zap the tree EH table. */
6598 set_eh_throw_stmt_table (fun, NULL);
6599
6600 /* We need JUMP_LABEL be set in order to redirect jumps, and hence
6601 split edges which edge insertions might do. */
6602 rebuild_jump_labels (get_insns ());
6603
6604 /* If we have a single successor to the entry block, put the pending insns
6605 after parm birth, but before NOTE_INSNS_FUNCTION_BEG. */
6606 if (single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (fun)))
6607 {
6608 edge e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (fun));
6609 if (e->insns.r)
6610 {
6611 rtx_insn *insns = e->insns.r;
6612 e->insns.r = NULL;
6613 rebuild_jump_labels_chain (insns);
6614 if (NOTE_P (parm_birth_insn)
6615 && NOTE_KIND (parm_birth_insn) == NOTE_INSN_FUNCTION_BEG)
6616 emit_insn_before_noloc (insns, parm_birth_insn, e->dest);
6617 else
6618 emit_insn_after_noloc (insns, parm_birth_insn, e->dest);
6619 }
6620 }
6621
6622 /* Otherwise, as well as for other edges, take the usual way. */
6623 commit_edge_insertions ();
6624
6625 /* We're done expanding trees to RTL. */
6626 currently_expanding_to_rtl = 0;
6627
6628 flush_mark_addressable_queue ();
6629
6630 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (fun)->next_bb,
6631 EXIT_BLOCK_PTR_FOR_FN (fun), next_bb)
6632 {
6633 edge e;
6634 edge_iterator ei;
6635 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
6636 {
6637 /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */
6638 e->flags &= ~EDGE_EXECUTABLE;
6639
6640 /* At the moment not all abnormal edges match the RTL
6641 representation. It is safe to remove them here as
6642 find_many_sub_basic_blocks will rediscover them.
6643 In the future we should get this fixed properly. */
6644 if ((e->flags & EDGE_ABNORMAL)
6645 && !(e->flags & EDGE_SIBCALL))
6646 remove_edge (e);
6647 else
6648 ei_next (&ei);
6649 }
6650 }
6651
6652 auto_sbitmap blocks (last_basic_block_for_fn (fun));
6653 bitmap_ones (blocks);
6654 find_many_sub_basic_blocks (blocks);
6655 purge_all_dead_edges ();
6656
6657 /* After initial rtl generation, call back to finish generating
6658 exception support code. We need to do this before cleaning up
6659 the CFG as the code does not expect dead landing pads. */
6660 if (fun->eh->region_tree != NULL)
6661 finish_eh_generation ();
6662
6663 /* Call expand_stack_alignment after finishing all
6664 updates to crtl->preferred_stack_boundary. */
6665 expand_stack_alignment ();
6666
6667 /* Fixup REG_EQUIV notes in the prologue if there are tailcalls in this
6668 function. */
6669 if (crtl->tail_call_emit)
6670 fixup_tail_calls ();
6671
6672 /* BB subdivision may have created basic blocks that are are only reachable
6673 from unlikely bbs but not marked as such in the profile. */
6674 if (optimize)
6675 propagate_unlikely_bbs_forward ();
6676
6677 /* Remove unreachable blocks, otherwise we cannot compute dominators
6678 which are needed for loop state verification. As a side-effect
6679 this also compacts blocks.
6680 ??? We cannot remove trivially dead insns here as for example
6681 the DRAP reg on i?86 is not magically live at this point.
6682 gcc.c-torture/execute/ipa-sra-2.c execution, -Os -m32 fails otherwise. */
6683 cleanup_cfg (CLEANUP_NO_INSN_DEL);
6684
6685 checking_verify_flow_info ();
6686
6687 /* Initialize pseudos allocated for hard registers. */
6688 emit_initial_value_sets ();
6689
6690 /* And finally unshare all RTL. */
6691 unshare_all_rtl ();
6692
6693 /* There's no need to defer outputting this function any more; we
6694 know we want to output it. */
6695 DECL_DEFER_OUTPUT (current_function_decl) = 0;
6696
6697 /* Now that we're done expanding trees to RTL, we shouldn't have any
6698 more CONCATs anywhere. */
6699 generating_concat_p = 0;
6700
6701 if (dump_file)
6702 {
6703 fprintf (dump_file,
6704 "\n\n;;\n;; Full RTL generated for this function:\n;;\n");
6705 /* And the pass manager will dump RTL for us. */
6706 }
6707
6708 /* If we're emitting a nested function, make sure its parent gets
6709 emitted as well. Doing otherwise confuses debug info. */
6710 {
6711 tree parent;
6712 for (parent = DECL_CONTEXT (current_function_decl);
6713 parent != NULL_TREE;
6714 parent = get_containing_scope (parent))
6715 if (TREE_CODE (parent) == FUNCTION_DECL)
6716 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (parent)) = 1;
6717 }
6718
6719 TREE_ASM_WRITTEN (current_function_decl) = 1;
6720
6721 /* After expanding, the return labels are no longer needed. */
6722 return_label = NULL;
6723 naked_return_label = NULL;
6724
6725 /* After expanding, the tm_restart map is no longer needed. */
6726 if (fun->gimple_df->tm_restart)
6727 fun->gimple_df->tm_restart = NULL;
6728
6729 /* Tag the blocks with a depth number so that change_scope can find
6730 the common parent easily. */
6731 set_block_levels (DECL_INITIAL (fun->decl), 0);
6732 default_rtl_profile ();
6733
6734 /* For -dx discard loops now, otherwise IL verify in clean_state will
6735 ICE. */
6736 if (rtl_dump_and_exit)
6737 {
6738 cfun->curr_properties &= ~PROP_loops;
6739 loop_optimizer_finalize ();
6740 }
6741
6742 timevar_pop (TV_POST_EXPAND);
6743
6744 return 0;
6745 }
6746
6747 } // anon namespace
6748
6749 rtl_opt_pass *
6750 make_pass_expand (gcc::context *ctxt)
6751 {
6752 return new pass_expand (ctxt);
6753 }