]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-propagate.c
Correct a function pre/postcondition [PR102403].
[thirdparty/gcc.git] / gcc / tree-ssa-propagate.c
1 /* Generic SSA value propagation engine.
2 Copyright (C) 2004-2021 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "ssa.h"
28 #include "gimple-pretty-print.h"
29 #include "dumpfile.h"
30 #include "gimple-fold.h"
31 #include "tree-eh.h"
32 #include "gimplify.h"
33 #include "gimple-iterator.h"
34 #include "tree-cfg.h"
35 #include "tree-ssa.h"
36 #include "tree-ssa-propagate.h"
37 #include "domwalk.h"
38 #include "cfgloop.h"
39 #include "tree-cfgcleanup.h"
40 #include "cfganal.h"
41
42 /* This file implements a generic value propagation engine based on
43 the same propagation used by the SSA-CCP algorithm [1].
44
45 Propagation is performed by simulating the execution of every
46 statement that produces the value being propagated. Simulation
47 proceeds as follows:
48
49 1- Initially, all edges of the CFG are marked not executable and
50 the CFG worklist is seeded with all the statements in the entry
51 basic block (block 0).
52
53 2- Every statement S is simulated with a call to the call-back
54 function SSA_PROP_VISIT_STMT. This evaluation may produce 3
55 results:
56
57 SSA_PROP_NOT_INTERESTING: Statement S produces nothing of
58 interest and does not affect any of the work lists.
59 The statement may be simulated again if any of its input
60 operands change in future iterations of the simulator.
61
62 SSA_PROP_VARYING: The value produced by S cannot be determined
63 at compile time. Further simulation of S is not required.
64 If S is a conditional jump, all the outgoing edges for the
65 block are considered executable and added to the work
66 list.
67
68 SSA_PROP_INTERESTING: S produces a value that can be computed
69 at compile time. Its result can be propagated into the
70 statements that feed from S. Furthermore, if S is a
71 conditional jump, only the edge known to be taken is added
72 to the work list. Edges that are known not to execute are
73 never simulated.
74
75 3- PHI nodes are simulated with a call to SSA_PROP_VISIT_PHI. The
76 return value from SSA_PROP_VISIT_PHI has the same semantics as
77 described in #2.
78
79 4- Three work lists are kept. Statements are only added to these
80 lists if they produce one of SSA_PROP_INTERESTING or
81 SSA_PROP_VARYING.
82
83 CFG_BLOCKS contains the list of blocks to be simulated.
84 Blocks are added to this list if their incoming edges are
85 found executable.
86
87 SSA_EDGE_WORKLIST contains the list of statements that we
88 need to revisit.
89
90 5- Simulation terminates when all three work lists are drained.
91
92 Before calling ssa_propagate, it is important to clear
93 prop_simulate_again_p for all the statements in the program that
94 should be simulated. This initialization allows an implementation
95 to specify which statements should never be simulated.
96
97 It is also important to compute def-use information before calling
98 ssa_propagate.
99
100 References:
101
102 [1] Constant propagation with conditional branches,
103 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
104
105 [2] Building an Optimizing Compiler,
106 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
107
108 [3] Advanced Compiler Design and Implementation,
109 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
110
111 /* Worklists of control flow edge destinations. This contains
112 the CFG order number of the blocks so we can iterate in CFG
113 order by visiting in bit-order. We use two worklists to
114 first make forward progress before iterating. */
115 static bitmap cfg_blocks;
116 static bitmap cfg_blocks_back;
117 static int *bb_to_cfg_order;
118 static int *cfg_order_to_bb;
119
120 /* Worklists of SSA edges which will need reexamination as their
121 definition has changed. SSA edges are def-use edges in the SSA
122 web. For each D-U edge, we store the target statement or PHI node
123 UID in a bitmap. UIDs order stmts in execution order. We use
124 two worklists to first make forward progress before iterating. */
125 static bitmap ssa_edge_worklist;
126 static bitmap ssa_edge_worklist_back;
127 static vec<gimple *> uid_to_stmt;
128
129 /* Current RPO index in the iteration. */
130 static int curr_order;
131
132
133 /* We have just defined a new value for VAR. If IS_VARYING is true,
134 add all immediate uses of VAR to VARYING_SSA_EDGES, otherwise add
135 them to INTERESTING_SSA_EDGES. */
136
137 static void
138 add_ssa_edge (tree var)
139 {
140 imm_use_iterator iter;
141 use_operand_p use_p;
142
143 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
144 {
145 gimple *use_stmt = USE_STMT (use_p);
146 if (!prop_simulate_again_p (use_stmt))
147 continue;
148
149 /* If we did not yet simulate the block wait for this to happen
150 and do not add the stmt to the SSA edge worklist. */
151 basic_block use_bb = gimple_bb (use_stmt);
152 if (! (use_bb->flags & BB_VISITED))
153 continue;
154
155 /* If this is a use on a not yet executable edge do not bother to
156 queue it. */
157 if (gimple_code (use_stmt) == GIMPLE_PHI
158 && !(EDGE_PRED (use_bb, PHI_ARG_INDEX_FROM_USE (use_p))->flags
159 & EDGE_EXECUTABLE))
160 continue;
161
162 bitmap worklist;
163 if (bb_to_cfg_order[gimple_bb (use_stmt)->index] < curr_order)
164 worklist = ssa_edge_worklist_back;
165 else
166 worklist = ssa_edge_worklist;
167 if (bitmap_set_bit (worklist, gimple_uid (use_stmt)))
168 {
169 uid_to_stmt[gimple_uid (use_stmt)] = use_stmt;
170 if (dump_file && (dump_flags & TDF_DETAILS))
171 {
172 fprintf (dump_file, "ssa_edge_worklist: adding SSA use in ");
173 print_gimple_stmt (dump_file, use_stmt, 0, TDF_SLIM);
174 }
175 }
176 }
177 }
178
179
180 /* Add edge E to the control flow worklist. */
181
182 static void
183 add_control_edge (edge e)
184 {
185 basic_block bb = e->dest;
186 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
187 return;
188
189 /* If the edge had already been executed, skip it. */
190 if (e->flags & EDGE_EXECUTABLE)
191 return;
192
193 e->flags |= EDGE_EXECUTABLE;
194
195 int bb_order = bb_to_cfg_order[bb->index];
196 if (bb_order < curr_order)
197 bitmap_set_bit (cfg_blocks_back, bb_order);
198 else
199 bitmap_set_bit (cfg_blocks, bb_order);
200
201 if (dump_file && (dump_flags & TDF_DETAILS))
202 fprintf (dump_file, "Adding destination of edge (%d -> %d) to worklist\n",
203 e->src->index, e->dest->index);
204 }
205
206
207 /* Simulate the execution of STMT and update the work lists accordingly. */
208
209 void
210 ssa_propagation_engine::simulate_stmt (gimple *stmt)
211 {
212 enum ssa_prop_result val = SSA_PROP_NOT_INTERESTING;
213 edge taken_edge = NULL;
214 tree output_name = NULL_TREE;
215
216 /* Pull the stmt off the SSA edge worklist. */
217 bitmap_clear_bit (ssa_edge_worklist, gimple_uid (stmt));
218
219 /* Don't bother visiting statements that are already
220 considered varying by the propagator. */
221 if (!prop_simulate_again_p (stmt))
222 return;
223
224 if (gimple_code (stmt) == GIMPLE_PHI)
225 {
226 val = visit_phi (as_a <gphi *> (stmt));
227 output_name = gimple_phi_result (stmt);
228 }
229 else
230 val = visit_stmt (stmt, &taken_edge, &output_name);
231
232 if (val == SSA_PROP_VARYING)
233 {
234 prop_set_simulate_again (stmt, false);
235
236 /* If the statement produced a new varying value, add the SSA
237 edges coming out of OUTPUT_NAME. */
238 if (output_name)
239 add_ssa_edge (output_name);
240
241 /* If STMT transfers control out of its basic block, add
242 all outgoing edges to the work list. */
243 if (stmt_ends_bb_p (stmt))
244 {
245 edge e;
246 edge_iterator ei;
247 basic_block bb = gimple_bb (stmt);
248 FOR_EACH_EDGE (e, ei, bb->succs)
249 add_control_edge (e);
250 }
251 return;
252 }
253 else if (val == SSA_PROP_INTERESTING)
254 {
255 /* If the statement produced new value, add the SSA edges coming
256 out of OUTPUT_NAME. */
257 if (output_name)
258 add_ssa_edge (output_name);
259
260 /* If we know which edge is going to be taken out of this block,
261 add it to the CFG work list. */
262 if (taken_edge)
263 add_control_edge (taken_edge);
264 }
265
266 /* If there are no SSA uses on the stmt whose defs are simulated
267 again then this stmt will be never visited again. */
268 bool has_simulate_again_uses = false;
269 use_operand_p use_p;
270 ssa_op_iter iter;
271 if (gimple_code (stmt) == GIMPLE_PHI)
272 {
273 edge_iterator ei;
274 edge e;
275 tree arg;
276 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->preds)
277 if (!(e->flags & EDGE_EXECUTABLE)
278 || ((arg = PHI_ARG_DEF_FROM_EDGE (stmt, e))
279 && TREE_CODE (arg) == SSA_NAME
280 && !SSA_NAME_IS_DEFAULT_DEF (arg)
281 && prop_simulate_again_p (SSA_NAME_DEF_STMT (arg))))
282 {
283 has_simulate_again_uses = true;
284 break;
285 }
286 }
287 else
288 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
289 {
290 gimple *def_stmt = SSA_NAME_DEF_STMT (USE_FROM_PTR (use_p));
291 if (!gimple_nop_p (def_stmt)
292 && prop_simulate_again_p (def_stmt))
293 {
294 has_simulate_again_uses = true;
295 break;
296 }
297 }
298 if (!has_simulate_again_uses)
299 {
300 if (dump_file && (dump_flags & TDF_DETAILS))
301 fprintf (dump_file, "marking stmt to be not simulated again\n");
302 prop_set_simulate_again (stmt, false);
303 }
304 }
305
306
307 /* Simulate the execution of BLOCK. Evaluate the statement associated
308 with each variable reference inside the block. */
309
310 void
311 ssa_propagation_engine::simulate_block (basic_block block)
312 {
313 gimple_stmt_iterator gsi;
314
315 /* There is nothing to do for the exit block. */
316 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
317 return;
318
319 if (dump_file && (dump_flags & TDF_DETAILS))
320 fprintf (dump_file, "\nSimulating block %d\n", block->index);
321
322 /* Always simulate PHI nodes, even if we have simulated this block
323 before. */
324 for (gsi = gsi_start_phis (block); !gsi_end_p (gsi); gsi_next (&gsi))
325 simulate_stmt (gsi_stmt (gsi));
326
327 /* If this is the first time we've simulated this block, then we
328 must simulate each of its statements. */
329 if (! (block->flags & BB_VISITED))
330 {
331 gimple_stmt_iterator j;
332 unsigned int normal_edge_count;
333 edge e, normal_edge;
334 edge_iterator ei;
335
336 for (j = gsi_start_bb (block); !gsi_end_p (j); gsi_next (&j))
337 simulate_stmt (gsi_stmt (j));
338
339 /* Note that we have simulated this block. */
340 block->flags |= BB_VISITED;
341
342 /* We cannot predict when abnormal and EH edges will be executed, so
343 once a block is considered executable, we consider any
344 outgoing abnormal edges as executable.
345
346 TODO: This is not exactly true. Simplifying statement might
347 prove it non-throwing and also computed goto can be handled
348 when destination is known.
349
350 At the same time, if this block has only one successor that is
351 reached by non-abnormal edges, then add that successor to the
352 worklist. */
353 normal_edge_count = 0;
354 normal_edge = NULL;
355 FOR_EACH_EDGE (e, ei, block->succs)
356 {
357 if (e->flags & (EDGE_ABNORMAL | EDGE_EH))
358 add_control_edge (e);
359 else
360 {
361 normal_edge_count++;
362 normal_edge = e;
363 }
364 }
365
366 if (normal_edge_count == 1)
367 add_control_edge (normal_edge);
368 }
369 }
370
371
372 /* Initialize local data structures and work lists. */
373
374 static void
375 ssa_prop_init (void)
376 {
377 edge e;
378 edge_iterator ei;
379 basic_block bb;
380
381 /* Worklists of SSA edges. */
382 ssa_edge_worklist = BITMAP_ALLOC (NULL);
383 ssa_edge_worklist_back = BITMAP_ALLOC (NULL);
384 bitmap_tree_view (ssa_edge_worklist);
385 bitmap_tree_view (ssa_edge_worklist_back);
386
387 /* Worklist of basic-blocks. */
388 bb_to_cfg_order = XNEWVEC (int, last_basic_block_for_fn (cfun) + 1);
389 cfg_order_to_bb = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
390 int n = pre_and_rev_post_order_compute_fn (cfun, NULL,
391 cfg_order_to_bb, false);
392 for (int i = 0; i < n; ++i)
393 bb_to_cfg_order[cfg_order_to_bb[i]] = i;
394 cfg_blocks = BITMAP_ALLOC (NULL);
395 cfg_blocks_back = BITMAP_ALLOC (NULL);
396
397 /* Initially assume that every edge in the CFG is not executable.
398 (including the edges coming out of the entry block). Mark blocks
399 as not visited, blocks not yet visited will have all their statements
400 simulated once an incoming edge gets executable. */
401 set_gimple_stmt_max_uid (cfun, 0);
402 for (int i = 0; i < n; ++i)
403 {
404 gimple_stmt_iterator si;
405 bb = BASIC_BLOCK_FOR_FN (cfun, cfg_order_to_bb[i]);
406
407 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
408 {
409 gimple *stmt = gsi_stmt (si);
410 gimple_set_uid (stmt, inc_gimple_stmt_max_uid (cfun));
411 }
412
413 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
414 {
415 gimple *stmt = gsi_stmt (si);
416 gimple_set_uid (stmt, inc_gimple_stmt_max_uid (cfun));
417 }
418
419 bb->flags &= ~BB_VISITED;
420 FOR_EACH_EDGE (e, ei, bb->succs)
421 e->flags &= ~EDGE_EXECUTABLE;
422 }
423 uid_to_stmt.safe_grow (gimple_stmt_max_uid (cfun), true);
424 }
425
426
427 /* Free allocated storage. */
428
429 static void
430 ssa_prop_fini (void)
431 {
432 BITMAP_FREE (cfg_blocks);
433 BITMAP_FREE (cfg_blocks_back);
434 free (bb_to_cfg_order);
435 free (cfg_order_to_bb);
436 BITMAP_FREE (ssa_edge_worklist);
437 BITMAP_FREE (ssa_edge_worklist_back);
438 uid_to_stmt.release ();
439 }
440
441
442 /* Entry point to the propagation engine.
443
444 The VISIT_STMT virtual function is called for every statement
445 visited and the VISIT_PHI virtual function is called for every PHI
446 node visited. */
447
448 void
449 ssa_propagation_engine::ssa_propagate (void)
450 {
451 ssa_prop_init ();
452
453 curr_order = 0;
454
455 /* Iterate until the worklists are empty. We iterate both blocks
456 and stmts in RPO order, using sets of two worklists to first
457 complete the current iteration before iterating over backedges.
458 Seed the algorithm by adding the successors of the entry block to the
459 edge worklist. */
460 edge e;
461 edge_iterator ei;
462 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
463 {
464 e->flags &= ~EDGE_EXECUTABLE;
465 add_control_edge (e);
466 }
467 while (1)
468 {
469 int next_block_order = (bitmap_empty_p (cfg_blocks)
470 ? -1 : bitmap_first_set_bit (cfg_blocks));
471 int next_stmt_uid = (bitmap_empty_p (ssa_edge_worklist)
472 ? -1 : bitmap_first_set_bit (ssa_edge_worklist));
473 if (next_block_order == -1 && next_stmt_uid == -1)
474 {
475 if (bitmap_empty_p (cfg_blocks_back)
476 && bitmap_empty_p (ssa_edge_worklist_back))
477 break;
478
479 if (dump_file && (dump_flags & TDF_DETAILS))
480 fprintf (dump_file, "Regular worklists empty, now processing "
481 "backedge destinations\n");
482 std::swap (cfg_blocks, cfg_blocks_back);
483 std::swap (ssa_edge_worklist, ssa_edge_worklist_back);
484 continue;
485 }
486
487 int next_stmt_bb_order = -1;
488 gimple *next_stmt = NULL;
489 if (next_stmt_uid != -1)
490 {
491 next_stmt = uid_to_stmt[next_stmt_uid];
492 next_stmt_bb_order = bb_to_cfg_order[gimple_bb (next_stmt)->index];
493 }
494
495 /* Pull the next block to simulate off the worklist if it comes first. */
496 if (next_block_order != -1
497 && (next_stmt_bb_order == -1
498 || next_block_order <= next_stmt_bb_order))
499 {
500 curr_order = next_block_order;
501 bitmap_clear_bit (cfg_blocks, next_block_order);
502 basic_block bb
503 = BASIC_BLOCK_FOR_FN (cfun, cfg_order_to_bb [next_block_order]);
504 simulate_block (bb);
505 }
506 /* Else simulate from the SSA edge worklist. */
507 else
508 {
509 curr_order = next_stmt_bb_order;
510 if (dump_file && (dump_flags & TDF_DETAILS))
511 {
512 fprintf (dump_file, "\nSimulating statement: ");
513 print_gimple_stmt (dump_file, next_stmt, 0, dump_flags);
514 }
515 simulate_stmt (next_stmt);
516 }
517 }
518
519 ssa_prop_fini ();
520 }
521
522 /* Return true if STMT is of the form 'mem_ref = RHS', where 'mem_ref'
523 is a non-volatile pointer dereference, a structure reference or a
524 reference to a single _DECL. Ignore volatile memory references
525 because they are not interesting for the optimizers. */
526
527 bool
528 stmt_makes_single_store (gimple *stmt)
529 {
530 tree lhs;
531
532 if (gimple_code (stmt) != GIMPLE_ASSIGN
533 && gimple_code (stmt) != GIMPLE_CALL)
534 return false;
535
536 if (!gimple_vdef (stmt))
537 return false;
538
539 lhs = gimple_get_lhs (stmt);
540
541 /* A call statement may have a null LHS. */
542 if (!lhs)
543 return false;
544
545 return (!TREE_THIS_VOLATILE (lhs)
546 && (DECL_P (lhs)
547 || REFERENCE_CLASS_P (lhs)));
548 }
549
550
551 /* Propagation statistics. */
552 struct prop_stats_d
553 {
554 long num_const_prop;
555 long num_copy_prop;
556 long num_stmts_folded;
557 long num_dce;
558 };
559
560 static struct prop_stats_d prop_stats;
561
562 /* Replace USE references in statement STMT with the values stored in
563 PROP_VALUE. Return true if at least one reference was replaced. */
564
565 bool
566 substitute_and_fold_engine::replace_uses_in (gimple *stmt)
567 {
568 bool replaced = false;
569 use_operand_p use;
570 ssa_op_iter iter;
571
572 FOR_EACH_SSA_USE_OPERAND (use, stmt, iter, SSA_OP_USE)
573 {
574 tree tuse = USE_FROM_PTR (use);
575 tree val = value_of_expr (tuse, stmt);
576
577 if (val == tuse || val == NULL_TREE)
578 continue;
579
580 if (gimple_code (stmt) == GIMPLE_ASM
581 && !may_propagate_copy_into_asm (tuse))
582 continue;
583
584 if (!may_propagate_copy (tuse, val))
585 continue;
586
587 if (TREE_CODE (val) != SSA_NAME)
588 prop_stats.num_const_prop++;
589 else
590 prop_stats.num_copy_prop++;
591
592 propagate_value (use, val);
593
594 replaced = true;
595 }
596
597 return replaced;
598 }
599
600
601 /* Replace propagated values into all the arguments for PHI using the
602 values from PROP_VALUE. */
603
604 bool
605 substitute_and_fold_engine::replace_phi_args_in (gphi *phi)
606 {
607 size_t i;
608 bool replaced = false;
609
610 for (i = 0; i < gimple_phi_num_args (phi); i++)
611 {
612 tree arg = gimple_phi_arg_def (phi, i);
613
614 if (TREE_CODE (arg) == SSA_NAME)
615 {
616 edge e = gimple_phi_arg_edge (phi, i);
617 tree val = value_on_edge (e, arg);
618
619 if (val && val != arg && may_propagate_copy (arg, val))
620 {
621 if (TREE_CODE (val) != SSA_NAME)
622 prop_stats.num_const_prop++;
623 else
624 prop_stats.num_copy_prop++;
625
626 propagate_value (PHI_ARG_DEF_PTR (phi, i), val);
627 replaced = true;
628
629 /* If we propagated a copy and this argument flows
630 through an abnormal edge, update the replacement
631 accordingly. */
632 if (TREE_CODE (val) == SSA_NAME
633 && e->flags & EDGE_ABNORMAL
634 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))
635 {
636 /* This can only occur for virtual operands, since
637 for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))
638 would prevent replacement. */
639 gcc_checking_assert (virtual_operand_p (val));
640 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1;
641 }
642 }
643 }
644 }
645
646 if (dump_file && (dump_flags & TDF_DETAILS))
647 {
648 if (!replaced)
649 fprintf (dump_file, "No folding possible\n");
650 else
651 {
652 fprintf (dump_file, "Folded into: ");
653 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
654 fprintf (dump_file, "\n");
655 }
656 }
657
658 return replaced;
659 }
660
661
662 class substitute_and_fold_dom_walker : public dom_walker
663 {
664 public:
665 substitute_and_fold_dom_walker (cdi_direction direction,
666 class substitute_and_fold_engine *engine)
667 : dom_walker (direction),
668 something_changed (false),
669 substitute_and_fold_engine (engine)
670 {
671 stmts_to_remove.create (0);
672 stmts_to_fixup.create (0);
673 need_eh_cleanup = BITMAP_ALLOC (NULL);
674 }
675 ~substitute_and_fold_dom_walker ()
676 {
677 stmts_to_remove.release ();
678 stmts_to_fixup.release ();
679 BITMAP_FREE (need_eh_cleanup);
680 }
681
682 virtual edge before_dom_children (basic_block);
683 virtual void after_dom_children (basic_block bb)
684 {
685 substitute_and_fold_engine->post_fold_bb (bb);
686 }
687
688 bool something_changed;
689 vec<gimple *> stmts_to_remove;
690 vec<gimple *> stmts_to_fixup;
691 bitmap need_eh_cleanup;
692
693 class substitute_and_fold_engine *substitute_and_fold_engine;
694
695 private:
696 void foreach_new_stmt_in_bb (gimple_stmt_iterator old_gsi,
697 gimple_stmt_iterator new_gsi);
698 };
699
700 /* Call post_new_stmt for each each new statement that has been added
701 to the current BB. OLD_GSI is the statement iterator before the BB
702 changes ocurred. NEW_GSI is the iterator which may contain new
703 statements. */
704
705 void
706 substitute_and_fold_dom_walker::foreach_new_stmt_in_bb
707 (gimple_stmt_iterator old_gsi,
708 gimple_stmt_iterator new_gsi)
709 {
710 basic_block bb = gsi_bb (new_gsi);
711 if (gsi_end_p (old_gsi))
712 old_gsi = gsi_start_bb (bb);
713 else
714 gsi_next (&old_gsi);
715 while (gsi_stmt (old_gsi) != gsi_stmt (new_gsi))
716 {
717 gimple *stmt = gsi_stmt (old_gsi);
718 substitute_and_fold_engine->post_new_stmt (stmt);
719 gsi_next (&old_gsi);
720 }
721 }
722
723 bool
724 substitute_and_fold_engine::propagate_into_phi_args (basic_block bb)
725 {
726 edge e;
727 edge_iterator ei;
728 bool propagated = false;
729
730 /* Visit BB successor PHI nodes and replace PHI args. */
731 FOR_EACH_EDGE (e, ei, bb->succs)
732 {
733 for (gphi_iterator gpi = gsi_start_phis (e->dest);
734 !gsi_end_p (gpi); gsi_next (&gpi))
735 {
736 gphi *phi = gpi.phi ();
737 use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
738 tree arg = USE_FROM_PTR (use_p);
739 if (TREE_CODE (arg) != SSA_NAME
740 || virtual_operand_p (arg))
741 continue;
742 tree val = value_on_edge (e, arg);
743 if (val
744 && is_gimple_min_invariant (val)
745 && may_propagate_copy (arg, val))
746 {
747 propagate_value (use_p, val);
748 propagated = true;
749 }
750 }
751 }
752 return propagated;
753 }
754
755 edge
756 substitute_and_fold_dom_walker::before_dom_children (basic_block bb)
757 {
758 substitute_and_fold_engine->pre_fold_bb (bb);
759
760 /* Propagate known values into PHI nodes. */
761 for (gphi_iterator i = gsi_start_phis (bb);
762 !gsi_end_p (i);
763 gsi_next (&i))
764 {
765 gphi *phi = i.phi ();
766 tree res = gimple_phi_result (phi);
767 if (virtual_operand_p (res))
768 continue;
769 if (dump_file && (dump_flags & TDF_DETAILS))
770 {
771 fprintf (dump_file, "Folding PHI node: ");
772 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
773 }
774 if (res && TREE_CODE (res) == SSA_NAME)
775 {
776 tree sprime = substitute_and_fold_engine->value_of_expr (res, phi);
777 if (sprime
778 && sprime != res
779 && may_propagate_copy (res, sprime))
780 {
781 if (dump_file && (dump_flags & TDF_DETAILS))
782 {
783 fprintf (dump_file, "Queued PHI for removal. Folds to: ");
784 print_generic_expr (dump_file, sprime);
785 fprintf (dump_file, "\n");
786 }
787 stmts_to_remove.safe_push (phi);
788 continue;
789 }
790 }
791 something_changed |= substitute_and_fold_engine->replace_phi_args_in (phi);
792 }
793
794 /* Propagate known values into stmts. In some case it exposes
795 more trivially deletable stmts to walk backward. */
796 for (gimple_stmt_iterator i = gsi_start_bb (bb);
797 !gsi_end_p (i);
798 gsi_next (&i))
799 {
800 bool did_replace;
801 gimple *stmt = gsi_stmt (i);
802
803 substitute_and_fold_engine->pre_fold_stmt (stmt);
804
805 if (dump_file && (dump_flags & TDF_DETAILS))
806 {
807 fprintf (dump_file, "Folding statement: ");
808 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
809 }
810
811 /* No point propagating into a stmt we have a value for we
812 can propagate into all uses. Mark it for removal instead. */
813 tree lhs = gimple_get_lhs (stmt);
814 if (lhs && TREE_CODE (lhs) == SSA_NAME)
815 {
816 tree sprime = substitute_and_fold_engine->value_of_expr (lhs, stmt);
817 if (sprime
818 && sprime != lhs
819 && may_propagate_copy (lhs, sprime)
820 && !stmt_could_throw_p (cfun, stmt)
821 && !gimple_has_side_effects (stmt)
822 /* We have to leave ASSERT_EXPRs around for jump-threading. */
823 && (!is_gimple_assign (stmt)
824 || gimple_assign_rhs_code (stmt) != ASSERT_EXPR))
825 {
826 if (dump_file && (dump_flags & TDF_DETAILS))
827 {
828 fprintf (dump_file, "Queued stmt for removal. Folds to: ");
829 print_generic_expr (dump_file, sprime);
830 fprintf (dump_file, "\n");
831 }
832 stmts_to_remove.safe_push (stmt);
833 continue;
834 }
835 }
836
837 /* Replace the statement with its folded version and mark it
838 folded. */
839 did_replace = false;
840 gimple *old_stmt = stmt;
841 bool was_noreturn = (is_gimple_call (stmt)
842 && gimple_call_noreturn_p (stmt));
843
844 /* Replace real uses in the statement. */
845 did_replace |= substitute_and_fold_engine->replace_uses_in (stmt);
846
847 gimple_stmt_iterator prev_gsi = i;
848 gsi_prev (&prev_gsi);
849
850 /* If we made a replacement, fold the statement. */
851 if (did_replace)
852 {
853 fold_stmt (&i, follow_single_use_edges);
854 stmt = gsi_stmt (i);
855 gimple_set_modified (stmt, true);
856 }
857 /* Also fold if we want to fold all statements. */
858 else if (substitute_and_fold_engine->fold_all_stmts
859 && fold_stmt (&i, follow_single_use_edges))
860 {
861 did_replace = true;
862 stmt = gsi_stmt (i);
863 gimple_set_modified (stmt, true);
864 }
865
866 /* Some statements may be simplified using propagator
867 specific information. Do this before propagating
868 into the stmt to not disturb pass specific information. */
869 update_stmt_if_modified (stmt);
870 if (substitute_and_fold_engine->fold_stmt (&i))
871 {
872 did_replace = true;
873 prop_stats.num_stmts_folded++;
874 stmt = gsi_stmt (i);
875 gimple_set_modified (stmt, true);
876 }
877
878 /* If this is a control statement the propagator left edges
879 unexecuted on force the condition in a way consistent with
880 that. See PR66945 for cases where the propagator can end
881 up with a different idea of a taken edge than folding
882 (once undefined behavior is involved). */
883 if (gimple_code (stmt) == GIMPLE_COND)
884 {
885 if ((EDGE_SUCC (bb, 0)->flags & EDGE_EXECUTABLE)
886 ^ (EDGE_SUCC (bb, 1)->flags & EDGE_EXECUTABLE))
887 {
888 if (((EDGE_SUCC (bb, 0)->flags & EDGE_TRUE_VALUE) != 0)
889 == ((EDGE_SUCC (bb, 0)->flags & EDGE_EXECUTABLE) != 0))
890 gimple_cond_make_true (as_a <gcond *> (stmt));
891 else
892 gimple_cond_make_false (as_a <gcond *> (stmt));
893 gimple_set_modified (stmt, true);
894 did_replace = true;
895 }
896 }
897
898 /* Now cleanup. */
899 if (did_replace)
900 {
901 foreach_new_stmt_in_bb (prev_gsi, i);
902
903 /* If we cleaned up EH information from the statement,
904 remove EH edges. */
905 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
906 bitmap_set_bit (need_eh_cleanup, bb->index);
907
908 /* If we turned a not noreturn call into a noreturn one
909 schedule it for fixup. */
910 if (!was_noreturn
911 && is_gimple_call (stmt)
912 && gimple_call_noreturn_p (stmt))
913 stmts_to_fixup.safe_push (stmt);
914
915 if (gimple_assign_single_p (stmt))
916 {
917 tree rhs = gimple_assign_rhs1 (stmt);
918
919 if (TREE_CODE (rhs) == ADDR_EXPR)
920 recompute_tree_invariant_for_addr_expr (rhs);
921 }
922
923 /* Determine what needs to be done to update the SSA form. */
924 update_stmt_if_modified (stmt);
925 if (!is_gimple_debug (stmt))
926 something_changed = true;
927 }
928
929 if (dump_file && (dump_flags & TDF_DETAILS))
930 {
931 if (did_replace)
932 {
933 fprintf (dump_file, "Folded into: ");
934 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
935 fprintf (dump_file, "\n");
936 }
937 else
938 fprintf (dump_file, "Not folded\n");
939 }
940 }
941
942 something_changed |= substitute_and_fold_engine->propagate_into_phi_args (bb);
943
944 return NULL;
945 }
946
947
948
949 /* Perform final substitution and folding of propagated values.
950 Process the whole function if BLOCK is null, otherwise only
951 process the blocks that BLOCK dominates. In the latter case,
952 it is the caller's responsibility to ensure that dominator
953 information is available and up-to-date.
954
955 PROP_VALUE[I] contains the single value that should be substituted
956 at every use of SSA name N_I. If PROP_VALUE is NULL, no values are
957 substituted.
958
959 If FOLD_FN is non-NULL the function will be invoked on all statements
960 before propagating values for pass specific simplification.
961
962 DO_DCE is true if trivially dead stmts can be removed.
963
964 If DO_DCE is true, the statements within a BB are walked from
965 last to first element. Otherwise we scan from first to last element.
966
967 Return TRUE when something changed. */
968
969 bool
970 substitute_and_fold_engine::substitute_and_fold (basic_block block)
971 {
972 if (dump_file && (dump_flags & TDF_DETAILS))
973 fprintf (dump_file, "\nSubstituting values and folding statements\n\n");
974
975 memset (&prop_stats, 0, sizeof (prop_stats));
976
977 /* Don't call calculate_dominance_info when iterating over a subgraph.
978 Callers that are using the interface this way are likely to want to
979 iterate over several disjoint subgraphs, and it would be expensive
980 in enable-checking builds to revalidate the whole dominance tree
981 each time. */
982 if (block)
983 gcc_assert (dom_info_state (CDI_DOMINATORS));
984 else
985 calculate_dominance_info (CDI_DOMINATORS);
986 substitute_and_fold_dom_walker walker (CDI_DOMINATORS, this);
987 walker.walk (block ? block : ENTRY_BLOCK_PTR_FOR_FN (cfun));
988
989 /* We cannot remove stmts during the BB walk, especially not release
990 SSA names there as that destroys the lattice of our callers.
991 Remove stmts in reverse order to make debug stmt creation possible. */
992 while (!walker.stmts_to_remove.is_empty ())
993 {
994 gimple *stmt = walker.stmts_to_remove.pop ();
995 if (dump_file && dump_flags & TDF_DETAILS)
996 {
997 fprintf (dump_file, "Removing dead stmt ");
998 print_gimple_stmt (dump_file, stmt, 0);
999 fprintf (dump_file, "\n");
1000 }
1001 prop_stats.num_dce++;
1002 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1003 if (gimple_code (stmt) == GIMPLE_PHI)
1004 remove_phi_node (&gsi, true);
1005 else
1006 {
1007 unlink_stmt_vdef (stmt);
1008 gsi_remove (&gsi, true);
1009 release_defs (stmt);
1010 }
1011 }
1012
1013 if (!bitmap_empty_p (walker.need_eh_cleanup))
1014 gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup);
1015
1016 /* Fixup stmts that became noreturn calls. This may require splitting
1017 blocks and thus isn't possible during the dominator walk. Do this
1018 in reverse order so we don't inadvertedly remove a stmt we want to
1019 fixup by visiting a dominating now noreturn call first. */
1020 while (!walker.stmts_to_fixup.is_empty ())
1021 {
1022 gimple *stmt = walker.stmts_to_fixup.pop ();
1023 if (dump_file && dump_flags & TDF_DETAILS)
1024 {
1025 fprintf (dump_file, "Fixing up noreturn call ");
1026 print_gimple_stmt (dump_file, stmt, 0);
1027 fprintf (dump_file, "\n");
1028 }
1029 fixup_noreturn_call (stmt);
1030 }
1031
1032 statistics_counter_event (cfun, "Constants propagated",
1033 prop_stats.num_const_prop);
1034 statistics_counter_event (cfun, "Copies propagated",
1035 prop_stats.num_copy_prop);
1036 statistics_counter_event (cfun, "Statements folded",
1037 prop_stats.num_stmts_folded);
1038 statistics_counter_event (cfun, "Statements deleted",
1039 prop_stats.num_dce);
1040
1041 return walker.something_changed;
1042 }
1043
1044
1045 /* Return true if we may propagate ORIG into DEST, false otherwise. */
1046
1047 bool
1048 may_propagate_copy (tree dest, tree orig)
1049 {
1050 tree type_d = TREE_TYPE (dest);
1051 tree type_o = TREE_TYPE (orig);
1052
1053 /* If ORIG is a default definition which flows in from an abnormal edge
1054 then the copy can be propagated. It is important that we do so to avoid
1055 uninitialized copies. */
1056 if (TREE_CODE (orig) == SSA_NAME
1057 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig)
1058 && SSA_NAME_IS_DEFAULT_DEF (orig)
1059 && (SSA_NAME_VAR (orig) == NULL_TREE
1060 || TREE_CODE (SSA_NAME_VAR (orig)) == VAR_DECL))
1061 ;
1062 /* Otherwise if ORIG just flows in from an abnormal edge then the copy cannot
1063 be propagated. */
1064 else if (TREE_CODE (orig) == SSA_NAME
1065 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig))
1066 return false;
1067 /* Similarly if DEST flows in from an abnormal edge then the copy cannot be
1068 propagated. */
1069 else if (TREE_CODE (dest) == SSA_NAME
1070 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (dest))
1071 return false;
1072
1073 /* Do not copy between types for which we *do* need a conversion. */
1074 if (!useless_type_conversion_p (type_d, type_o))
1075 return false;
1076
1077 /* Generally propagating virtual operands is not ok as that may
1078 create overlapping life-ranges. */
1079 if (TREE_CODE (dest) == SSA_NAME && virtual_operand_p (dest))
1080 return false;
1081
1082 /* Anything else is OK. */
1083 return true;
1084 }
1085
1086 /* Like may_propagate_copy, but use as the destination expression
1087 the principal expression (typically, the RHS) contained in
1088 statement DEST. This is more efficient when working with the
1089 gimple tuples representation. */
1090
1091 bool
1092 may_propagate_copy_into_stmt (gimple *dest, tree orig)
1093 {
1094 tree type_d;
1095 tree type_o;
1096
1097 /* If the statement is a switch or a single-rhs assignment,
1098 then the expression to be replaced by the propagation may
1099 be an SSA_NAME. Fortunately, there is an explicit tree
1100 for the expression, so we delegate to may_propagate_copy. */
1101
1102 if (gimple_assign_single_p (dest))
1103 return may_propagate_copy (gimple_assign_rhs1 (dest), orig);
1104 else if (gswitch *dest_swtch = dyn_cast <gswitch *> (dest))
1105 return may_propagate_copy (gimple_switch_index (dest_swtch), orig);
1106
1107 /* In other cases, the expression is not materialized, so there
1108 is no destination to pass to may_propagate_copy. On the other
1109 hand, the expression cannot be an SSA_NAME, so the analysis
1110 is much simpler. */
1111
1112 if (TREE_CODE (orig) == SSA_NAME
1113 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig))
1114 return false;
1115
1116 if (is_gimple_assign (dest))
1117 type_d = TREE_TYPE (gimple_assign_lhs (dest));
1118 else if (gimple_code (dest) == GIMPLE_COND)
1119 type_d = boolean_type_node;
1120 else if (is_gimple_call (dest)
1121 && gimple_call_lhs (dest) != NULL_TREE)
1122 type_d = TREE_TYPE (gimple_call_lhs (dest));
1123 else
1124 gcc_unreachable ();
1125
1126 type_o = TREE_TYPE (orig);
1127
1128 if (!useless_type_conversion_p (type_d, type_o))
1129 return false;
1130
1131 return true;
1132 }
1133
1134 /* Similarly, but we know that we're propagating into an ASM_EXPR. */
1135
1136 bool
1137 may_propagate_copy_into_asm (tree dest ATTRIBUTE_UNUSED)
1138 {
1139 return true;
1140 }
1141
1142
1143 /* Common code for propagate_value and replace_exp.
1144
1145 Replace use operand OP_P with VAL. FOR_PROPAGATION indicates if the
1146 replacement is done to propagate a value or not. */
1147
1148 static void
1149 replace_exp_1 (use_operand_p op_p, tree val,
1150 bool for_propagation ATTRIBUTE_UNUSED)
1151 {
1152 if (flag_checking)
1153 {
1154 tree op = USE_FROM_PTR (op_p);
1155 gcc_assert (!(for_propagation
1156 && TREE_CODE (op) == SSA_NAME
1157 && TREE_CODE (val) == SSA_NAME
1158 && !may_propagate_copy (op, val)));
1159 }
1160
1161 if (TREE_CODE (val) == SSA_NAME)
1162 SET_USE (op_p, val);
1163 else
1164 SET_USE (op_p, unshare_expr (val));
1165 }
1166
1167
1168 /* Propagate the value VAL (assumed to be a constant or another SSA_NAME)
1169 into the operand pointed to by OP_P.
1170
1171 Use this version for const/copy propagation as it will perform additional
1172 checks to ensure validity of the const/copy propagation. */
1173
1174 void
1175 propagate_value (use_operand_p op_p, tree val)
1176 {
1177 replace_exp_1 (op_p, val, true);
1178 }
1179
1180 /* Replace *OP_P with value VAL (assumed to be a constant or another SSA_NAME).
1181
1182 Use this version when not const/copy propagating values. For example,
1183 PRE uses this version when building expressions as they would appear
1184 in specific blocks taking into account actions of PHI nodes.
1185
1186 The statement in which an expression has been replaced should be
1187 folded using fold_stmt_inplace. */
1188
1189 void
1190 replace_exp (use_operand_p op_p, tree val)
1191 {
1192 replace_exp_1 (op_p, val, false);
1193 }
1194
1195
1196 /* Propagate the value VAL (assumed to be a constant or another SSA_NAME)
1197 into the tree pointed to by OP_P.
1198
1199 Use this version for const/copy propagation when SSA operands are not
1200 available. It will perform the additional checks to ensure validity of
1201 the const/copy propagation, but will not update any operand information.
1202 Be sure to mark the stmt as modified. */
1203
1204 void
1205 propagate_tree_value (tree *op_p, tree val)
1206 {
1207 if (TREE_CODE (val) == SSA_NAME)
1208 *op_p = val;
1209 else
1210 *op_p = unshare_expr (val);
1211 }
1212
1213
1214 /* Like propagate_tree_value, but use as the operand to replace
1215 the principal expression (typically, the RHS) contained in the
1216 statement referenced by iterator GSI. Note that it is not
1217 always possible to update the statement in-place, so a new
1218 statement may be created to replace the original. */
1219
1220 void
1221 propagate_tree_value_into_stmt (gimple_stmt_iterator *gsi, tree val)
1222 {
1223 gimple *stmt = gsi_stmt (*gsi);
1224
1225 if (is_gimple_assign (stmt))
1226 {
1227 tree expr = NULL_TREE;
1228 if (gimple_assign_single_p (stmt))
1229 expr = gimple_assign_rhs1 (stmt);
1230 propagate_tree_value (&expr, val);
1231 gimple_assign_set_rhs_from_tree (gsi, expr);
1232 }
1233 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
1234 {
1235 tree lhs = NULL_TREE;
1236 tree rhs = build_zero_cst (TREE_TYPE (val));
1237 propagate_tree_value (&lhs, val);
1238 gimple_cond_set_code (cond_stmt, NE_EXPR);
1239 gimple_cond_set_lhs (cond_stmt, lhs);
1240 gimple_cond_set_rhs (cond_stmt, rhs);
1241 }
1242 else if (is_gimple_call (stmt)
1243 && gimple_call_lhs (stmt) != NULL_TREE)
1244 {
1245 tree expr = NULL_TREE;
1246 propagate_tree_value (&expr, val);
1247 replace_call_with_value (gsi, expr);
1248 }
1249 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1250 propagate_tree_value (gimple_switch_index_ptr (swtch_stmt), val);
1251 else
1252 gcc_unreachable ();
1253 }
1254
1255 /* Check exits of each loop in FUN, walk over loop closed PHIs in
1256 each exit basic block and propagate degenerate PHIs. */
1257
1258 unsigned
1259 clean_up_loop_closed_phi (function *fun)
1260 {
1261 gphi *phi;
1262 tree rhs;
1263 tree lhs;
1264 gphi_iterator gsi;
1265
1266 /* Avoid possibly quadratic work when scanning for loop exits across
1267 all loops of a nest. */
1268 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1269 return 0;
1270
1271 /* replace_uses_by might purge dead EH edges and we want it to also
1272 remove dominated blocks. */
1273 calculate_dominance_info (CDI_DOMINATORS);
1274
1275 /* Walk over loop in function. */
1276 for (auto loop : loops_list (fun, 0))
1277 {
1278 /* Check each exit edege of loop. */
1279 auto_vec<edge> exits = get_loop_exit_edges (loop);
1280 for (edge e : exits)
1281 if (single_pred_p (e->dest))
1282 /* Walk over loop-closed PHIs. */
1283 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi);)
1284 {
1285 phi = gsi.phi ();
1286 rhs = gimple_phi_arg_def (phi, 0);
1287 lhs = gimple_phi_result (phi);
1288
1289 if (rhs && may_propagate_copy (lhs, rhs))
1290 {
1291 /* Dump details. */
1292 if (dump_file && (dump_flags & TDF_DETAILS))
1293 {
1294 fprintf (dump_file, " Replacing '");
1295 print_generic_expr (dump_file, lhs, dump_flags);
1296 fprintf (dump_file, "' with '");
1297 print_generic_expr (dump_file, rhs, dump_flags);
1298 fprintf (dump_file, "'\n");
1299 }
1300
1301 replace_uses_by (lhs, rhs);
1302 remove_phi_node (&gsi, true);
1303 }
1304 else
1305 gsi_next (&gsi);
1306 }
1307 }
1308
1309 return 0;
1310 }