1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2017 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "tree-pass.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
33 #include "gimple-fold.h"
35 #include "gimple-iterator.h"
37 #include "tree-into-ssa.h"
39 #include "tree-ssa-propagate.h"
40 #include "tree-ssa-threadupdate.h"
42 #include "tree-ssa-scopedtables.h"
43 #include "tree-ssa-threadedge.h"
44 #include "tree-ssa-dom.h"
46 #include "tree-cfgcleanup.h"
49 /* This file implements optimizations on the dominator tree. */
51 /* Structure for recording edge equivalences.
53 Computing and storing the edge equivalences instead of creating
54 them on-demand can save significant amounts of time, particularly
55 for pathological cases involving switch statements.
57 These structures live for a single iteration of the dominator
58 optimizer in the edge's AUX field. At the end of an iteration we
59 free each of these structures. */
63 /* If this edge creates a simple equivalence, the LHS and RHS of
64 the equivalence will be stored here. */
68 /* Traversing an edge may also indicate one or more particular conditions
70 vec
<cond_equivalence
> cond_equivalences
;
73 /* Track whether or not we have changed the control flow graph. */
74 static bool cfg_altered
;
76 /* Bitmap of blocks that have had EH statements cleaned. We should
77 remove their dead edges eventually. */
78 static bitmap need_eh_cleanup
;
79 static vec
<gimple
*> need_noreturn_fixup
;
81 /* Statistics for dominator optimizations. */
85 long num_exprs_considered
;
91 static struct opt_stats_d opt_stats
;
93 /* Local functions. */
94 static edge
optimize_stmt (basic_block
, gimple_stmt_iterator
,
95 class const_and_copies
*,
96 class avail_exprs_stack
*);
97 static void record_equality (tree
, tree
, class const_and_copies
*);
98 static void record_equivalences_from_phis (basic_block
);
99 static void record_equivalences_from_incoming_edge (basic_block
,
100 class const_and_copies
*,
101 class avail_exprs_stack
*);
102 static void eliminate_redundant_computations (gimple_stmt_iterator
*,
103 class const_and_copies
*,
104 class avail_exprs_stack
*);
105 static void record_equivalences_from_stmt (gimple
*, int,
106 class avail_exprs_stack
*);
107 static edge
single_incoming_edge_ignoring_loop_edges (basic_block
);
108 static void dump_dominator_optimization_stats (FILE *file
,
109 hash_table
<expr_elt_hasher
> *);
112 /* Free the edge_info data attached to E, if it exists. */
115 free_dom_edge_info (edge e
)
117 struct edge_info
*edge_info
= (struct edge_info
*)e
->aux
;
121 edge_info
->cond_equivalences
.release ();
126 /* Allocate an EDGE_INFO for edge E and attach it to E.
127 Return the new EDGE_INFO structure. */
129 static struct edge_info
*
130 allocate_edge_info (edge e
)
132 struct edge_info
*edge_info
;
134 /* Free the old one, if it exists. */
135 free_dom_edge_info (e
);
137 edge_info
= XCNEW (struct edge_info
);
143 /* Free all EDGE_INFO structures associated with edges in the CFG.
144 If a particular edge can be threaded, copy the redirection
145 target from the EDGE_INFO structure into the edge's AUX field
146 as required by code to update the CFG and SSA graph for
150 free_all_edge_infos (void)
156 FOR_EACH_BB_FN (bb
, cfun
)
158 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
160 free_dom_edge_info (e
);
166 /* We have finished optimizing BB, record any information implied by
167 taking a specific outgoing edge from BB. */
170 record_edge_info (basic_block bb
)
172 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
173 struct edge_info
*edge_info
;
175 if (! gsi_end_p (gsi
))
177 gimple
*stmt
= gsi_stmt (gsi
);
178 location_t loc
= gimple_location (stmt
);
180 if (gimple_code (stmt
) == GIMPLE_SWITCH
)
182 gswitch
*switch_stmt
= as_a
<gswitch
*> (stmt
);
183 tree index
= gimple_switch_index (switch_stmt
);
185 if (TREE_CODE (index
) == SSA_NAME
)
188 int n_labels
= gimple_switch_num_labels (switch_stmt
);
189 tree
*info
= XCNEWVEC (tree
, last_basic_block_for_fn (cfun
));
193 for (i
= 0; i
< n_labels
; i
++)
195 tree label
= gimple_switch_label (switch_stmt
, i
);
196 basic_block target_bb
= label_to_block (CASE_LABEL (label
));
197 if (CASE_HIGH (label
)
199 || info
[target_bb
->index
])
200 info
[target_bb
->index
] = error_mark_node
;
202 info
[target_bb
->index
] = label
;
205 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
207 basic_block target_bb
= e
->dest
;
208 tree label
= info
[target_bb
->index
];
210 if (label
!= NULL
&& label
!= error_mark_node
)
212 tree x
= fold_convert_loc (loc
, TREE_TYPE (index
),
214 edge_info
= allocate_edge_info (e
);
215 edge_info
->lhs
= index
;
223 /* A COND_EXPR may create equivalences too. */
224 if (gimple_code (stmt
) == GIMPLE_COND
)
229 tree op0
= gimple_cond_lhs (stmt
);
230 tree op1
= gimple_cond_rhs (stmt
);
231 enum tree_code code
= gimple_cond_code (stmt
);
233 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
235 /* Special case comparing booleans against a constant as we
236 know the value of OP0 on both arms of the branch. i.e., we
237 can record an equivalence for OP0 rather than COND.
239 However, don't do this if the constant isn't zero or one.
240 Such conditionals will get optimized more thoroughly during
242 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
243 && TREE_CODE (op0
) == SSA_NAME
244 && ssa_name_has_boolean_range (op0
)
245 && is_gimple_min_invariant (op1
)
246 && (integer_zerop (op1
) || integer_onep (op1
)))
248 tree true_val
= constant_boolean_node (true, TREE_TYPE (op0
));
249 tree false_val
= constant_boolean_node (false, TREE_TYPE (op0
));
253 edge_info
= allocate_edge_info (true_edge
);
254 edge_info
->lhs
= op0
;
255 edge_info
->rhs
= (integer_zerop (op1
) ? false_val
: true_val
);
257 edge_info
= allocate_edge_info (false_edge
);
258 edge_info
->lhs
= op0
;
259 edge_info
->rhs
= (integer_zerop (op1
) ? true_val
: false_val
);
263 edge_info
= allocate_edge_info (true_edge
);
264 edge_info
->lhs
= op0
;
265 edge_info
->rhs
= (integer_zerop (op1
) ? true_val
: false_val
);
267 edge_info
= allocate_edge_info (false_edge
);
268 edge_info
->lhs
= op0
;
269 edge_info
->rhs
= (integer_zerop (op1
) ? false_val
: true_val
);
272 else if (is_gimple_min_invariant (op0
)
273 && (TREE_CODE (op1
) == SSA_NAME
274 || is_gimple_min_invariant (op1
)))
276 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
277 tree inverted
= invert_truthvalue_loc (loc
, cond
);
278 bool can_infer_simple_equiv
279 = !(HONOR_SIGNED_ZEROS (op0
)
280 && real_zerop (op0
));
281 struct edge_info
*edge_info
;
283 edge_info
= allocate_edge_info (true_edge
);
284 record_conditions (&edge_info
->cond_equivalences
, cond
, inverted
);
286 if (can_infer_simple_equiv
&& code
== EQ_EXPR
)
288 edge_info
->lhs
= op1
;
289 edge_info
->rhs
= op0
;
292 edge_info
= allocate_edge_info (false_edge
);
293 record_conditions (&edge_info
->cond_equivalences
, inverted
, cond
);
295 if (can_infer_simple_equiv
&& TREE_CODE (inverted
) == EQ_EXPR
)
297 edge_info
->lhs
= op1
;
298 edge_info
->rhs
= op0
;
302 else if (TREE_CODE (op0
) == SSA_NAME
303 && (TREE_CODE (op1
) == SSA_NAME
304 || is_gimple_min_invariant (op1
)))
306 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
307 tree inverted
= invert_truthvalue_loc (loc
, cond
);
308 bool can_infer_simple_equiv
309 = !(HONOR_SIGNED_ZEROS (op1
)
310 && (TREE_CODE (op1
) == SSA_NAME
|| real_zerop (op1
)));
311 struct edge_info
*edge_info
;
313 edge_info
= allocate_edge_info (true_edge
);
314 record_conditions (&edge_info
->cond_equivalences
, cond
, inverted
);
316 if (can_infer_simple_equiv
&& code
== EQ_EXPR
)
318 edge_info
->lhs
= op0
;
319 edge_info
->rhs
= op1
;
322 edge_info
= allocate_edge_info (false_edge
);
323 record_conditions (&edge_info
->cond_equivalences
, inverted
, cond
);
325 if (can_infer_simple_equiv
&& TREE_CODE (inverted
) == EQ_EXPR
)
327 edge_info
->lhs
= op0
;
328 edge_info
->rhs
= op1
;
333 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
338 class dom_opt_dom_walker
: public dom_walker
341 dom_opt_dom_walker (cdi_direction direction
,
342 class const_and_copies
*const_and_copies
,
343 class avail_exprs_stack
*avail_exprs_stack
)
344 : dom_walker (direction
, true),
345 m_const_and_copies (const_and_copies
),
346 m_avail_exprs_stack (avail_exprs_stack
),
347 m_dummy_cond (NULL
) {}
349 virtual edge
before_dom_children (basic_block
);
350 virtual void after_dom_children (basic_block
);
353 void thread_across_edge (edge
);
355 /* Unwindable equivalences, both const/copy and expression varieties. */
356 class const_and_copies
*m_const_and_copies
;
357 class avail_exprs_stack
*m_avail_exprs_stack
;
362 /* Jump threading, redundancy elimination and const/copy propagation.
364 This pass may expose new symbols that need to be renamed into SSA. For
365 every new symbol exposed, its corresponding bit will be set in
370 const pass_data pass_data_dominator
=
372 GIMPLE_PASS
, /* type */
374 OPTGROUP_NONE
, /* optinfo_flags */
375 TV_TREE_SSA_DOMINATOR_OPTS
, /* tv_id */
376 ( PROP_cfg
| PROP_ssa
), /* properties_required */
377 0, /* properties_provided */
378 0, /* properties_destroyed */
379 0, /* todo_flags_start */
380 ( TODO_cleanup_cfg
| TODO_update_ssa
), /* todo_flags_finish */
383 class pass_dominator
: public gimple_opt_pass
386 pass_dominator (gcc::context
*ctxt
)
387 : gimple_opt_pass (pass_data_dominator
, ctxt
),
388 may_peel_loop_headers_p (false)
391 /* opt_pass methods: */
392 opt_pass
* clone () { return new pass_dominator (m_ctxt
); }
393 void set_pass_param (unsigned int n
, bool param
)
396 may_peel_loop_headers_p
= param
;
398 virtual bool gate (function
*) { return flag_tree_dom
!= 0; }
399 virtual unsigned int execute (function
*);
402 /* This flag is used to prevent loops from being peeled repeatedly in jump
403 threading; it will be removed once we preserve loop structures throughout
404 the compilation -- we will be able to mark the affected loops directly in
405 jump threading, and avoid peeling them next time. */
406 bool may_peel_loop_headers_p
;
407 }; // class pass_dominator
410 pass_dominator::execute (function
*fun
)
412 memset (&opt_stats
, 0, sizeof (opt_stats
));
414 /* Create our hash tables. */
415 hash_table
<expr_elt_hasher
> *avail_exprs
416 = new hash_table
<expr_elt_hasher
> (1024);
417 class avail_exprs_stack
*avail_exprs_stack
418 = new class avail_exprs_stack (avail_exprs
);
419 class const_and_copies
*const_and_copies
= new class const_and_copies ();
420 need_eh_cleanup
= BITMAP_ALLOC (NULL
);
421 need_noreturn_fixup
.create (0);
423 calculate_dominance_info (CDI_DOMINATORS
);
426 /* We need to know loop structures in order to avoid destroying them
427 in jump threading. Note that we still can e.g. thread through loop
428 headers to an exit edge, or through loop header to the loop body, assuming
429 that we update the loop info.
431 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
432 to several overly conservative bail-outs in jump threading, case
433 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
434 missing. We should improve jump threading in future then
435 LOOPS_HAVE_PREHEADERS won't be needed here. */
436 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
| LOOPS_HAVE_SIMPLE_LATCHES
);
438 /* Initialize the value-handle array. */
439 threadedge_initialize_values ();
441 /* We need accurate information regarding back edges in the CFG
442 for jump threading; this may include back edges that are not part of
444 mark_dfs_back_edges ();
446 /* We want to create the edge info structures before the dominator walk
447 so that they'll be in place for the jump threader, particularly when
448 threading through a join block.
450 The conditions will be lazily updated with global equivalences as
451 we reach them during the dominator walk. */
453 FOR_EACH_BB_FN (bb
, fun
)
454 record_edge_info (bb
);
456 /* Recursively walk the dominator tree optimizing statements. */
457 dom_opt_dom_walker
walker (CDI_DOMINATORS
,
460 walker
.walk (fun
->cfg
->x_entry_block_ptr
);
462 /* Look for blocks where we cleared EDGE_EXECUTABLE on an outgoing
463 edge. When found, remove jump threads which contain any outgoing
464 edge from the affected block. */
467 FOR_EACH_BB_FN (bb
, fun
)
472 /* First see if there are any edges without EDGE_EXECUTABLE
475 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
477 if ((e
->flags
& EDGE_EXECUTABLE
) == 0)
484 /* If there were any such edges found, then remove jump threads
485 containing any edge leaving BB. */
487 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
488 remove_jump_threads_including (e
);
493 gimple_stmt_iterator gsi
;
495 FOR_EACH_BB_FN (bb
, fun
)
497 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
498 update_stmt_if_modified (gsi_stmt (gsi
));
502 /* If we exposed any new variables, go ahead and put them into
503 SSA form now, before we handle jump threading. This simplifies
504 interactions between rewriting of _DECL nodes into SSA form
505 and rewriting SSA_NAME nodes into SSA form after block
506 duplication and CFG manipulation. */
507 update_ssa (TODO_update_ssa
);
509 free_all_edge_infos ();
511 /* Thread jumps, creating duplicate blocks as needed. */
512 cfg_altered
|= thread_through_all_blocks (may_peel_loop_headers_p
);
515 free_dominance_info (CDI_DOMINATORS
);
517 /* Removal of statements may make some EH edges dead. Purge
518 such edges from the CFG as needed. */
519 if (!bitmap_empty_p (need_eh_cleanup
))
524 /* Jump threading may have created forwarder blocks from blocks
525 needing EH cleanup; the new successor of these blocks, which
526 has inherited from the original block, needs the cleanup.
527 Don't clear bits in the bitmap, as that can break the bitmap
529 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup
, 0, i
, bi
)
531 basic_block bb
= BASIC_BLOCK_FOR_FN (fun
, i
);
534 while (single_succ_p (bb
)
535 && (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
536 bb
= single_succ (bb
);
537 if (bb
== EXIT_BLOCK_PTR_FOR_FN (fun
))
539 if ((unsigned) bb
->index
!= i
)
540 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
543 gimple_purge_all_dead_eh_edges (need_eh_cleanup
);
544 bitmap_clear (need_eh_cleanup
);
547 /* Fixup stmts that became noreturn calls. This may require splitting
548 blocks and thus isn't possible during the dominator walk or before
549 jump threading finished. Do this in reverse order so we don't
550 inadvertedly remove a stmt we want to fixup by visiting a dominating
551 now noreturn call first. */
552 while (!need_noreturn_fixup
.is_empty ())
554 gimple
*stmt
= need_noreturn_fixup
.pop ();
555 if (dump_file
&& dump_flags
& TDF_DETAILS
)
557 fprintf (dump_file
, "Fixing up noreturn call ");
558 print_gimple_stmt (dump_file
, stmt
, 0, 0);
559 fprintf (dump_file
, "\n");
561 fixup_noreturn_call (stmt
);
564 statistics_counter_event (fun
, "Redundant expressions eliminated",
566 statistics_counter_event (fun
, "Constants propagated",
567 opt_stats
.num_const_prop
);
568 statistics_counter_event (fun
, "Copies propagated",
569 opt_stats
.num_copy_prop
);
571 /* Debugging dumps. */
572 if (dump_file
&& (dump_flags
& TDF_STATS
))
573 dump_dominator_optimization_stats (dump_file
, avail_exprs
);
575 loop_optimizer_finalize ();
577 /* Delete our main hashtable. */
581 /* Free asserted bitmaps and stacks. */
582 BITMAP_FREE (need_eh_cleanup
);
583 need_noreturn_fixup
.release ();
584 delete avail_exprs_stack
;
585 delete const_and_copies
;
587 /* Free the value-handle array. */
588 threadedge_finalize_values ();
596 make_pass_dominator (gcc::context
*ctxt
)
598 return new pass_dominator (ctxt
);
602 /* A trivial wrapper so that we can present the generic jump
603 threading code with a simple API for simplifying statements. */
605 simplify_stmt_for_jump_threading (gimple
*stmt
,
606 gimple
*within_stmt ATTRIBUTE_UNUSED
,
607 class avail_exprs_stack
*avail_exprs_stack
,
608 basic_block bb ATTRIBUTE_UNUSED
)
610 return avail_exprs_stack
->lookup_avail_expr (stmt
, false, true);
613 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
616 dom_valueize (tree t
)
618 if (TREE_CODE (t
) == SSA_NAME
)
620 tree tem
= SSA_NAME_VALUE (t
);
627 /* We have just found an equivalence for LHS on an edge E.
628 Look backwards to other uses of LHS and see if we can derive
629 additional equivalences that are valid on edge E. */
631 back_propagate_equivalences (tree lhs
, edge e
,
632 class const_and_copies
*const_and_copies
)
635 imm_use_iterator iter
;
637 basic_block dest
= e
->dest
;
639 /* Iterate over the uses of LHS to see if any dominate E->dest.
640 If so, they may create useful equivalences too.
642 ??? If the code gets re-organized to a worklist to catch more
643 indirect opportunities and it is made to handle PHIs then this
644 should only consider use_stmts in basic-blocks we have already visited. */
645 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
647 gimple
*use_stmt
= USE_STMT (use_p
);
649 /* Often the use is in DEST, which we trivially know we can't use.
650 This is cheaper than the dominator set tests below. */
651 if (dest
== gimple_bb (use_stmt
))
654 /* Filter out statements that can never produce a useful
656 tree lhs2
= gimple_get_lhs (use_stmt
);
657 if (!lhs2
|| TREE_CODE (lhs2
) != SSA_NAME
)
660 /* Profiling has shown the domination tests here can be fairly
661 expensive. We get significant improvements by building the
662 set of blocks that dominate BB. We can then just test
663 for set membership below.
665 We also initialize the set lazily since often the only uses
666 are going to be in the same block as DEST. */
669 domby
= BITMAP_ALLOC (NULL
);
670 basic_block bb
= get_immediate_dominator (CDI_DOMINATORS
, dest
);
673 bitmap_set_bit (domby
, bb
->index
);
674 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
678 /* This tests if USE_STMT does not dominate DEST. */
679 if (!bitmap_bit_p (domby
, gimple_bb (use_stmt
)->index
))
682 /* At this point USE_STMT dominates DEST and may result in a
683 useful equivalence. Try to simplify its RHS to a constant
685 tree res
= gimple_fold_stmt_to_constant_1 (use_stmt
, dom_valueize
,
686 no_follow_ssa_edges
);
687 if (res
&& (TREE_CODE (res
) == SSA_NAME
|| is_gimple_min_invariant (res
)))
688 record_equality (lhs2
, res
, const_and_copies
);
695 /* Record NAME has the value zero and if NAME was set from a BIT_IOR_EXPR
696 recurse into both operands recording their values as zero too. */
699 derive_equivalencs_from_bit_ior (tree name
, const_and_copies
*const_and_copies
)
701 if (TREE_CODE (name
) == SSA_NAME
)
703 tree value
= fold_convert (TREE_TYPE (name
), integer_zero_node
);
705 /* This records the equivalence for the toplevel object. */
706 record_equality (name
, value
, const_and_copies
);
708 /* And we can recurse into each operand to potentially find more
710 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
711 if (is_gimple_assign (def_stmt
)
712 && gimple_assign_rhs_code (def_stmt
) == BIT_IOR_EXPR
)
714 derive_equivalencs_from_bit_ior (gimple_assign_rhs1 (def_stmt
),
716 derive_equivalencs_from_bit_ior (gimple_assign_rhs2 (def_stmt
),
722 /* Record into CONST_AND_COPIES and AVAIL_EXPRS_STACK any equivalences implied
723 by traversing edge E (which are cached in E->aux).
725 Callers are responsible for managing the unwinding markers. */
727 record_temporary_equivalences (edge e
,
728 class const_and_copies
*const_and_copies
,
729 class avail_exprs_stack
*avail_exprs_stack
)
732 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
734 /* If we have info associated with this edge, record it into
735 our equivalence tables. */
738 cond_equivalence
*eq
;
739 /* If we have 0 = COND or 1 = COND equivalences, record them
740 into our expression hash tables. */
741 for (i
= 0; edge_info
->cond_equivalences
.iterate (i
, &eq
); ++i
)
743 avail_exprs_stack
->record_cond (eq
);
745 /* If the condition is testing that X == 0 is true or X != 0 is false
746 and X is set from a BIT_IOR_EXPR, then we can record equivalences
747 for the operands of the BIT_IOR_EXPR (and recurse on those). */
748 tree op0
= eq
->cond
.ops
.binary
.opnd0
;
749 tree op1
= eq
->cond
.ops
.binary
.opnd1
;
750 if (TREE_CODE (op0
) == SSA_NAME
&& integer_zerop (op1
))
752 enum tree_code code
= eq
->cond
.ops
.binary
.op
;
753 if ((code
== EQ_EXPR
&& eq
->value
== boolean_true_node
)
754 || (code
== NE_EXPR
&& eq
->value
== boolean_false_node
))
755 derive_equivalencs_from_bit_ior (op0
, const_and_copies
);
757 /* TODO: We could handle BIT_AND_EXPR in a similar fashion
758 recording that the operands have a nonzero value. */
760 /* TODO: We can handle more cases here, particularly when OP0 is
761 known to have a boolean range. */
765 tree lhs
= edge_info
->lhs
;
766 if (!lhs
|| TREE_CODE (lhs
) != SSA_NAME
)
769 /* Record the simple NAME = VALUE equivalence. */
770 tree rhs
= edge_info
->rhs
;
771 record_equality (lhs
, rhs
, const_and_copies
);
773 /* We already recorded that LHS = RHS, with canonicalization,
774 value chain following, etc.
776 We also want to record RHS = LHS, but without any canonicalization
777 or value chain following. */
778 if (TREE_CODE (rhs
) == SSA_NAME
)
779 const_and_copies
->record_const_or_copy_raw (rhs
, lhs
,
780 SSA_NAME_VALUE (rhs
));
782 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
783 set via a widening type conversion, then we may be able to record
784 additional equivalences. */
785 if (TREE_CODE (rhs
) == INTEGER_CST
)
787 gimple
*defstmt
= SSA_NAME_DEF_STMT (lhs
);
790 && is_gimple_assign (defstmt
)
791 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt
)))
793 tree old_rhs
= gimple_assign_rhs1 (defstmt
);
795 /* If the conversion widens the original value and
796 the constant is in the range of the type of OLD_RHS,
797 then convert the constant and record the equivalence.
799 Note that int_fits_type_p does not check the precision
800 if the upper and lower bounds are OK. */
801 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs
))
802 && (TYPE_PRECISION (TREE_TYPE (lhs
))
803 > TYPE_PRECISION (TREE_TYPE (old_rhs
)))
804 && int_fits_type_p (rhs
, TREE_TYPE (old_rhs
)))
806 tree newval
= fold_convert (TREE_TYPE (old_rhs
), rhs
);
807 record_equality (old_rhs
, newval
, const_and_copies
);
812 /* Any equivalence found for LHS may result in additional
813 equivalences for other uses of LHS that we have already
815 back_propagate_equivalences (lhs
, e
, const_and_copies
);
819 /* Wrapper for common code to attempt to thread an edge. For example,
820 it handles lazily building the dummy condition and the bookkeeping
821 when jump threading is successful. */
824 dom_opt_dom_walker::thread_across_edge (edge e
)
828 gimple_build_cond (NE_EXPR
,
829 integer_zero_node
, integer_zero_node
,
832 /* Push a marker on both stacks so we can unwind the tables back to their
834 m_avail_exprs_stack
->push_marker ();
835 m_const_and_copies
->push_marker ();
837 /* With all the edge equivalences in the tables, go ahead and attempt
838 to thread through E->dest. */
839 ::thread_across_edge (m_dummy_cond
, e
,
840 m_const_and_copies
, m_avail_exprs_stack
,
841 simplify_stmt_for_jump_threading
);
843 /* And restore the various tables to their state before
844 we threaded this edge.
846 XXX The code in tree-ssa-threadedge.c will restore the state of
847 the const_and_copies table. We we just have to restore the expression
849 m_avail_exprs_stack
->pop_to_marker ();
852 /* PHI nodes can create equivalences too.
854 Ignoring any alternatives which are the same as the result, if
855 all the alternatives are equal, then the PHI node creates an
859 record_equivalences_from_phis (basic_block bb
)
863 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
865 gphi
*phi
= gsi
.phi ();
867 tree lhs
= gimple_phi_result (phi
);
871 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
873 tree t
= gimple_phi_arg_def (phi
, i
);
875 /* Ignore alternatives which are the same as our LHS. Since
876 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
877 can simply compare pointers. */
881 /* If the associated edge is not marked as executable, then it
883 if ((gimple_phi_arg_edge (phi
, i
)->flags
& EDGE_EXECUTABLE
) == 0)
886 t
= dom_valueize (t
);
888 /* If we have not processed an alternative yet, then set
889 RHS to this alternative. */
892 /* If we have processed an alternative (stored in RHS), then
893 see if it is equal to this one. If it isn't, then stop
895 else if (! operand_equal_for_phi_arg_p (rhs
, t
))
899 /* If we had no interesting alternatives, then all the RHS alternatives
900 must have been the same as LHS. */
904 /* If we managed to iterate through each PHI alternative without
905 breaking out of the loop, then we have a PHI which may create
906 a useful equivalence. We do not need to record unwind data for
907 this, since this is a true assignment and not an equivalence
908 inferred from a comparison. All uses of this ssa name are dominated
909 by this assignment, so unwinding just costs time and space. */
910 if (i
== gimple_phi_num_args (phi
)
911 && may_propagate_copy (lhs
, rhs
))
912 set_ssa_name_value (lhs
, rhs
);
916 /* Ignoring loop backedges, if BB has precisely one incoming edge then
917 return that edge. Otherwise return NULL. */
919 single_incoming_edge_ignoring_loop_edges (basic_block bb
)
925 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
927 /* A loop back edge can be identified by the destination of
928 the edge dominating the source of the edge. */
929 if (dominated_by_p (CDI_DOMINATORS
, e
->src
, e
->dest
))
932 /* We can safely ignore edges that are not executable. */
933 if ((e
->flags
& EDGE_EXECUTABLE
) == 0)
936 /* If we have already seen a non-loop edge, then we must have
937 multiple incoming non-loop edges and thus we return NULL. */
941 /* This is the first non-loop incoming edge we have found. Record
949 /* Record any equivalences created by the incoming edge to BB into
950 CONST_AND_COPIES and AVAIL_EXPRS_STACK. If BB has more than one
951 incoming edge, then no equivalence is created. */
954 record_equivalences_from_incoming_edge (basic_block bb
,
955 class const_and_copies
*const_and_copies
,
956 class avail_exprs_stack
*avail_exprs_stack
)
961 /* If our parent block ended with a control statement, then we may be
962 able to record some equivalences based on which outgoing edge from
963 the parent was followed. */
964 parent
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
966 e
= single_incoming_edge_ignoring_loop_edges (bb
);
968 /* If we had a single incoming edge from our parent block, then enter
969 any data associated with the edge into our tables. */
970 if (e
&& e
->src
== parent
)
971 record_temporary_equivalences (e
, const_and_copies
, avail_exprs_stack
);
974 /* Dump statistics for the hash table HTAB. */
977 htab_statistics (FILE *file
, const hash_table
<expr_elt_hasher
> &htab
)
979 fprintf (file
, "size %ld, %ld elements, %f collision/search ratio\n",
981 (long) htab
.elements (),
985 /* Dump SSA statistics on FILE. */
988 dump_dominator_optimization_stats (FILE *file
,
989 hash_table
<expr_elt_hasher
> *avail_exprs
)
991 fprintf (file
, "Total number of statements: %6ld\n\n",
992 opt_stats
.num_stmts
);
993 fprintf (file
, "Exprs considered for dominator optimizations: %6ld\n",
994 opt_stats
.num_exprs_considered
);
996 fprintf (file
, "\nHash table statistics:\n");
998 fprintf (file
, " avail_exprs: ");
999 htab_statistics (file
, *avail_exprs
);
1003 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1004 This constrains the cases in which we may treat this as assignment. */
1007 record_equality (tree x
, tree y
, class const_and_copies
*const_and_copies
)
1009 tree prev_x
= NULL
, prev_y
= NULL
;
1011 if (tree_swap_operands_p (x
, y
))
1014 /* Most of the time tree_swap_operands_p does what we want. But there
1015 are cases where we know one operand is better for copy propagation than
1016 the other. Given no other code cares about ordering of equality
1017 comparison operators for that purpose, we just handle the special cases
1019 if (TREE_CODE (x
) == SSA_NAME
&& TREE_CODE (y
) == SSA_NAME
)
1021 /* If one operand is a single use operand, then make it
1022 X. This will preserve its single use properly and if this
1023 conditional is eliminated, the computation of X can be
1024 eliminated as well. */
1025 if (has_single_use (y
) && ! has_single_use (x
))
1028 if (TREE_CODE (x
) == SSA_NAME
)
1029 prev_x
= SSA_NAME_VALUE (x
);
1030 if (TREE_CODE (y
) == SSA_NAME
)
1031 prev_y
= SSA_NAME_VALUE (y
);
1033 /* If one of the previous values is invariant, or invariant in more loops
1034 (by depth), then use that.
1035 Otherwise it doesn't matter which value we choose, just so
1036 long as we canonicalize on one value. */
1037 if (is_gimple_min_invariant (y
))
1039 else if (is_gimple_min_invariant (x
))
1040 prev_x
= x
, x
= y
, y
= prev_x
, prev_x
= prev_y
;
1041 else if (prev_x
&& is_gimple_min_invariant (prev_x
))
1042 x
= y
, y
= prev_x
, prev_x
= prev_y
;
1046 /* After the swapping, we must have one SSA_NAME. */
1047 if (TREE_CODE (x
) != SSA_NAME
)
1050 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1051 variable compared against zero. If we're honoring signed zeros,
1052 then we cannot record this value unless we know that the value is
1054 if (HONOR_SIGNED_ZEROS (x
)
1055 && (TREE_CODE (y
) != REAL_CST
1056 || real_equal (&dconst0
, &TREE_REAL_CST (y
))))
1059 const_and_copies
->record_const_or_copy (x
, y
, prev_x
);
1062 /* Returns true when STMT is a simple iv increment. It detects the
1063 following situation:
1065 i_1 = phi (..., i_2)
1066 i_2 = i_1 +/- ... */
1069 simple_iv_increment_p (gimple
*stmt
)
1071 enum tree_code code
;
1076 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1079 lhs
= gimple_assign_lhs (stmt
);
1080 if (TREE_CODE (lhs
) != SSA_NAME
)
1083 code
= gimple_assign_rhs_code (stmt
);
1084 if (code
!= PLUS_EXPR
1085 && code
!= MINUS_EXPR
1086 && code
!= POINTER_PLUS_EXPR
)
1089 preinc
= gimple_assign_rhs1 (stmt
);
1090 if (TREE_CODE (preinc
) != SSA_NAME
)
1093 phi
= SSA_NAME_DEF_STMT (preinc
);
1094 if (gimple_code (phi
) != GIMPLE_PHI
)
1097 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1098 if (gimple_phi_arg_def (phi
, i
) == lhs
)
1104 /* Propagate know values from SSA_NAME_VALUE into the PHI nodes of the
1105 successors of BB. */
1108 cprop_into_successor_phis (basic_block bb
,
1109 class const_and_copies
*const_and_copies
)
1114 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1119 /* If this is an abnormal edge, then we do not want to copy propagate
1120 into the PHI alternative associated with this edge. */
1121 if (e
->flags
& EDGE_ABNORMAL
)
1124 gsi
= gsi_start_phis (e
->dest
);
1125 if (gsi_end_p (gsi
))
1128 /* We may have an equivalence associated with this edge. While
1129 we can not propagate it into non-dominated blocks, we can
1130 propagate them into PHIs in non-dominated blocks. */
1132 /* Push the unwind marker so we can reset the const and copies
1133 table back to its original state after processing this edge. */
1134 const_and_copies
->push_marker ();
1136 /* Extract and record any simple NAME = VALUE equivalences.
1138 Don't bother with [01] = COND equivalences, they're not useful
1140 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
1143 tree lhs
= edge_info
->lhs
;
1144 tree rhs
= edge_info
->rhs
;
1146 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
1147 const_and_copies
->record_const_or_copy (lhs
, rhs
);
1151 for ( ; !gsi_end_p (gsi
); gsi_next (&gsi
))
1154 use_operand_p orig_p
;
1156 gphi
*phi
= gsi
.phi ();
1158 /* The alternative may be associated with a constant, so verify
1159 it is an SSA_NAME before doing anything with it. */
1160 orig_p
= gimple_phi_arg_imm_use_ptr (phi
, indx
);
1161 orig_val
= get_use_from_ptr (orig_p
);
1162 if (TREE_CODE (orig_val
) != SSA_NAME
)
1165 /* If we have *ORIG_P in our constant/copy table, then replace
1166 ORIG_P with its value in our constant/copy table. */
1167 new_val
= SSA_NAME_VALUE (orig_val
);
1169 && new_val
!= orig_val
1170 && may_propagate_copy (orig_val
, new_val
))
1171 propagate_value (orig_p
, new_val
);
1174 const_and_copies
->pop_to_marker ();
1179 dom_opt_dom_walker::before_dom_children (basic_block bb
)
1181 gimple_stmt_iterator gsi
;
1183 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1184 fprintf (dump_file
, "\n\nOptimizing block #%d\n\n", bb
->index
);
1186 /* Push a marker on the stacks of local information so that we know how
1187 far to unwind when we finalize this block. */
1188 m_avail_exprs_stack
->push_marker ();
1189 m_const_and_copies
->push_marker ();
1191 record_equivalences_from_incoming_edge (bb
, m_const_and_copies
,
1192 m_avail_exprs_stack
);
1194 /* PHI nodes can create equivalences too. */
1195 record_equivalences_from_phis (bb
);
1197 /* Create equivalences from redundant PHIs. PHIs are only truly
1198 redundant when they exist in the same block, so push another
1199 marker and unwind right afterwards. */
1200 m_avail_exprs_stack
->push_marker ();
1201 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1202 eliminate_redundant_computations (&gsi
, m_const_and_copies
,
1203 m_avail_exprs_stack
);
1204 m_avail_exprs_stack
->pop_to_marker ();
1206 edge taken_edge
= NULL
;
1207 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1209 = optimize_stmt (bb
, gsi
, m_const_and_copies
, m_avail_exprs_stack
);
1211 /* Now prepare to process dominated blocks. */
1212 record_edge_info (bb
);
1213 cprop_into_successor_phis (bb
, m_const_and_copies
);
1214 if (taken_edge
&& !dbg_cnt (dom_unreachable_edges
))
1220 /* We have finished processing the dominator children of BB, perform
1221 any finalization actions in preparation for leaving this node in
1222 the dominator tree. */
1225 dom_opt_dom_walker::after_dom_children (basic_block bb
)
1229 /* If we have an outgoing edge to a block with multiple incoming and
1230 outgoing edges, then we may be able to thread the edge, i.e., we
1231 may be able to statically determine which of the outgoing edges
1232 will be traversed when the incoming edge from BB is traversed. */
1233 if (single_succ_p (bb
)
1234 && (single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
) == 0
1235 && potentially_threadable_block (single_succ (bb
)))
1237 thread_across_edge (single_succ_edge (bb
));
1239 else if ((last
= last_stmt (bb
))
1240 && gimple_code (last
) == GIMPLE_COND
1241 && EDGE_COUNT (bb
->succs
) == 2
1242 && (EDGE_SUCC (bb
, 0)->flags
& EDGE_ABNORMAL
) == 0
1243 && (EDGE_SUCC (bb
, 1)->flags
& EDGE_ABNORMAL
) == 0)
1245 edge true_edge
, false_edge
;
1247 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
1249 /* Only try to thread the edge if it reaches a target block with
1250 more than one predecessor and more than one successor. */
1251 if (potentially_threadable_block (true_edge
->dest
))
1252 thread_across_edge (true_edge
);
1254 /* Similarly for the ELSE arm. */
1255 if (potentially_threadable_block (false_edge
->dest
))
1256 thread_across_edge (false_edge
);
1260 /* These remove expressions local to BB from the tables. */
1261 m_avail_exprs_stack
->pop_to_marker ();
1262 m_const_and_copies
->pop_to_marker ();
1265 /* Search for redundant computations in STMT. If any are found, then
1266 replace them with the variable holding the result of the computation.
1268 If safe, record this expression into AVAIL_EXPRS_STACK and
1269 CONST_AND_COPIES. */
1272 eliminate_redundant_computations (gimple_stmt_iterator
* gsi
,
1273 class const_and_copies
*const_and_copies
,
1274 class avail_exprs_stack
*avail_exprs_stack
)
1280 bool assigns_var_p
= false;
1282 gimple
*stmt
= gsi_stmt (*gsi
);
1284 if (gimple_code (stmt
) == GIMPLE_PHI
)
1285 def
= gimple_phi_result (stmt
);
1287 def
= gimple_get_lhs (stmt
);
1289 /* Certain expressions on the RHS can be optimized away, but can not
1290 themselves be entered into the hash tables. */
1292 || TREE_CODE (def
) != SSA_NAME
1293 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def
)
1294 || gimple_vdef (stmt
)
1295 /* Do not record equivalences for increments of ivs. This would create
1296 overlapping live ranges for a very questionable gain. */
1297 || simple_iv_increment_p (stmt
))
1300 /* Check if the expression has been computed before. */
1301 cached_lhs
= avail_exprs_stack
->lookup_avail_expr (stmt
, insert
, true);
1303 opt_stats
.num_exprs_considered
++;
1305 /* Get the type of the expression we are trying to optimize. */
1306 if (is_gimple_assign (stmt
))
1308 expr_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
1309 assigns_var_p
= true;
1311 else if (gimple_code (stmt
) == GIMPLE_COND
)
1312 expr_type
= boolean_type_node
;
1313 else if (is_gimple_call (stmt
))
1315 gcc_assert (gimple_call_lhs (stmt
));
1316 expr_type
= TREE_TYPE (gimple_call_lhs (stmt
));
1317 assigns_var_p
= true;
1319 else if (gswitch
*swtch_stmt
= dyn_cast
<gswitch
*> (stmt
))
1320 expr_type
= TREE_TYPE (gimple_switch_index (swtch_stmt
));
1321 else if (gimple_code (stmt
) == GIMPLE_PHI
)
1322 /* We can't propagate into a phi, so the logic below doesn't apply.
1323 Instead record an equivalence between the cached LHS and the
1324 PHI result of this statement, provided they are in the same block.
1325 This should be sufficient to kill the redundant phi. */
1327 if (def
&& cached_lhs
)
1328 const_and_copies
->record_const_or_copy (def
, cached_lhs
);
1337 /* It is safe to ignore types here since we have already done
1338 type checking in the hashing and equality routines. In fact
1339 type checking here merely gets in the way of constant
1340 propagation. Also, make sure that it is safe to propagate
1341 CACHED_LHS into the expression in STMT. */
1342 if ((TREE_CODE (cached_lhs
) != SSA_NAME
1344 || useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
))))
1345 || may_propagate_copy_into_stmt (stmt
, cached_lhs
))
1347 gcc_checking_assert (TREE_CODE (cached_lhs
) == SSA_NAME
1348 || is_gimple_min_invariant (cached_lhs
));
1350 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1352 fprintf (dump_file
, " Replaced redundant expr '");
1353 print_gimple_expr (dump_file
, stmt
, 0, dump_flags
);
1354 fprintf (dump_file
, "' with '");
1355 print_generic_expr (dump_file
, cached_lhs
, dump_flags
);
1356 fprintf (dump_file
, "'\n");
1362 && !useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
)))
1363 cached_lhs
= fold_convert (expr_type
, cached_lhs
);
1365 propagate_tree_value_into_stmt (gsi
, cached_lhs
);
1367 /* Since it is always necessary to mark the result as modified,
1368 perhaps we should move this into propagate_tree_value_into_stmt
1370 gimple_set_modified (gsi_stmt (*gsi
), true);
1374 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1375 the available expressions table or the const_and_copies table.
1376 Detect and record those equivalences into AVAIL_EXPRS_STACK.
1378 We handle only very simple copy equivalences here. The heavy
1379 lifing is done by eliminate_redundant_computations. */
1382 record_equivalences_from_stmt (gimple
*stmt
, int may_optimize_p
,
1383 class avail_exprs_stack
*avail_exprs_stack
)
1386 enum tree_code lhs_code
;
1388 gcc_assert (is_gimple_assign (stmt
));
1390 lhs
= gimple_assign_lhs (stmt
);
1391 lhs_code
= TREE_CODE (lhs
);
1393 if (lhs_code
== SSA_NAME
1394 && gimple_assign_single_p (stmt
))
1396 tree rhs
= gimple_assign_rhs1 (stmt
);
1398 /* If the RHS of the assignment is a constant or another variable that
1399 may be propagated, register it in the CONST_AND_COPIES table. We
1400 do not need to record unwind data for this, since this is a true
1401 assignment and not an equivalence inferred from a comparison. All
1402 uses of this ssa name are dominated by this assignment, so unwinding
1403 just costs time and space. */
1405 && (TREE_CODE (rhs
) == SSA_NAME
1406 || is_gimple_min_invariant (rhs
)))
1408 rhs
= dom_valueize (rhs
);
1410 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1412 fprintf (dump_file
, "==== ASGN ");
1413 print_generic_expr (dump_file
, lhs
, 0);
1414 fprintf (dump_file
, " = ");
1415 print_generic_expr (dump_file
, rhs
, 0);
1416 fprintf (dump_file
, "\n");
1419 set_ssa_name_value (lhs
, rhs
);
1423 /* Make sure we can propagate &x + CST. */
1424 if (lhs_code
== SSA_NAME
1425 && gimple_assign_rhs_code (stmt
) == POINTER_PLUS_EXPR
1426 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == ADDR_EXPR
1427 && TREE_CODE (gimple_assign_rhs2 (stmt
)) == INTEGER_CST
)
1429 tree op0
= gimple_assign_rhs1 (stmt
);
1430 tree op1
= gimple_assign_rhs2 (stmt
);
1432 = build_fold_addr_expr (fold_build2 (MEM_REF
,
1433 TREE_TYPE (TREE_TYPE (op0
)),
1435 fold_convert (ptr_type_node
,
1437 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1439 fprintf (dump_file
, "==== ASGN ");
1440 print_generic_expr (dump_file
, lhs
, 0);
1441 fprintf (dump_file
, " = ");
1442 print_generic_expr (dump_file
, new_rhs
, 0);
1443 fprintf (dump_file
, "\n");
1446 set_ssa_name_value (lhs
, new_rhs
);
1449 /* A memory store, even an aliased store, creates a useful
1450 equivalence. By exchanging the LHS and RHS, creating suitable
1451 vops and recording the result in the available expression table,
1452 we may be able to expose more redundant loads. */
1453 if (!gimple_has_volatile_ops (stmt
)
1454 && gimple_references_memory_p (stmt
)
1455 && gimple_assign_single_p (stmt
)
1456 && (TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
1457 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt
)))
1458 && !is_gimple_reg (lhs
))
1460 tree rhs
= gimple_assign_rhs1 (stmt
);
1463 /* Build a new statement with the RHS and LHS exchanged. */
1464 if (TREE_CODE (rhs
) == SSA_NAME
)
1466 /* NOTE tuples. The call to gimple_build_assign below replaced
1467 a call to build_gimple_modify_stmt, which did not set the
1468 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
1469 may cause an SSA validation failure, as the LHS may be a
1470 default-initialized name and should have no definition. I'm
1471 a bit dubious of this, as the artificial statement that we
1472 generate here may in fact be ill-formed, but it is simply
1473 used as an internal device in this pass, and never becomes
1475 gimple
*defstmt
= SSA_NAME_DEF_STMT (rhs
);
1476 new_stmt
= gimple_build_assign (rhs
, lhs
);
1477 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
1480 new_stmt
= gimple_build_assign (rhs
, lhs
);
1482 gimple_set_vuse (new_stmt
, gimple_vdef (stmt
));
1484 /* Finally enter the statement into the available expression
1486 avail_exprs_stack
->lookup_avail_expr (new_stmt
, true, true);
1490 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1491 CONST_AND_COPIES. */
1494 cprop_operand (gimple
*stmt
, use_operand_p op_p
)
1497 tree op
= USE_FROM_PTR (op_p
);
1499 /* If the operand has a known constant value or it is known to be a
1500 copy of some other variable, use the value or copy stored in
1501 CONST_AND_COPIES. */
1502 val
= SSA_NAME_VALUE (op
);
1503 if (val
&& val
!= op
)
1505 /* Do not replace hard register operands in asm statements. */
1506 if (gimple_code (stmt
) == GIMPLE_ASM
1507 && !may_propagate_copy_into_asm (op
))
1510 /* Certain operands are not allowed to be copy propagated due
1511 to their interaction with exception handling and some GCC
1513 if (!may_propagate_copy (op
, val
))
1516 /* Do not propagate copies into BIVs.
1517 See PR23821 and PR62217 for how this can disturb IV and
1518 number of iteration analysis. */
1519 if (TREE_CODE (val
) != INTEGER_CST
)
1521 gimple
*def
= SSA_NAME_DEF_STMT (op
);
1522 if (gimple_code (def
) == GIMPLE_PHI
1523 && gimple_bb (def
)->loop_father
->header
== gimple_bb (def
))
1528 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1530 fprintf (dump_file
, " Replaced '");
1531 print_generic_expr (dump_file
, op
, dump_flags
);
1532 fprintf (dump_file
, "' with %s '",
1533 (TREE_CODE (val
) != SSA_NAME
? "constant" : "variable"));
1534 print_generic_expr (dump_file
, val
, dump_flags
);
1535 fprintf (dump_file
, "'\n");
1538 if (TREE_CODE (val
) != SSA_NAME
)
1539 opt_stats
.num_const_prop
++;
1541 opt_stats
.num_copy_prop
++;
1543 propagate_value (op_p
, val
);
1545 /* And note that we modified this statement. This is now
1546 safe, even if we changed virtual operands since we will
1547 rescan the statement and rewrite its operands again. */
1548 gimple_set_modified (stmt
, true);
1552 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1553 known value for that SSA_NAME (or NULL if no value is known).
1555 Propagate values from CONST_AND_COPIES into the uses, vuses and
1556 vdef_ops of STMT. */
1559 cprop_into_stmt (gimple
*stmt
)
1563 tree last_copy_propagated_op
= NULL
;
1565 FOR_EACH_SSA_USE_OPERAND (op_p
, stmt
, iter
, SSA_OP_USE
)
1567 tree old_op
= USE_FROM_PTR (op_p
);
1569 /* If we have A = B and B = A in the copy propagation tables
1570 (due to an equality comparison), avoid substituting B for A
1571 then A for B in the trivially discovered cases. This allows
1572 optimization of statements were A and B appear as input
1574 if (old_op
!= last_copy_propagated_op
)
1576 cprop_operand (stmt
, op_p
);
1578 tree new_op
= USE_FROM_PTR (op_p
);
1579 if (new_op
!= old_op
&& TREE_CODE (new_op
) == SSA_NAME
)
1580 last_copy_propagated_op
= new_op
;
1585 /* Optimize the statement in block BB pointed to by iterator SI
1586 using equivalences from CONST_AND_COPIES and AVAIL_EXPRS_STACK.
1588 We try to perform some simplistic global redundancy elimination and
1589 constant propagation:
1591 1- To detect global redundancy, we keep track of expressions that have
1592 been computed in this block and its dominators. If we find that the
1593 same expression is computed more than once, we eliminate repeated
1594 computations by using the target of the first one.
1596 2- Constant values and copy assignments. This is used to do very
1597 simplistic constant and copy propagation. When a constant or copy
1598 assignment is found, we map the value on the RHS of the assignment to
1599 the variable in the LHS in the CONST_AND_COPIES table. */
1602 optimize_stmt (basic_block bb
, gimple_stmt_iterator si
,
1603 class const_and_copies
*const_and_copies
,
1604 class avail_exprs_stack
*avail_exprs_stack
)
1606 gimple
*stmt
, *old_stmt
;
1607 bool may_optimize_p
;
1608 bool modified_p
= false;
1612 old_stmt
= stmt
= gsi_stmt (si
);
1613 was_noreturn
= is_gimple_call (stmt
) && gimple_call_noreturn_p (stmt
);
1615 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1617 fprintf (dump_file
, "Optimizing statement ");
1618 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
1621 update_stmt_if_modified (stmt
);
1622 opt_stats
.num_stmts
++;
1624 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
1625 cprop_into_stmt (stmt
);
1627 /* If the statement has been modified with constant replacements,
1628 fold its RHS before checking for redundant computations. */
1629 if (gimple_modified_p (stmt
))
1633 /* Try to fold the statement making sure that STMT is kept
1635 if (fold_stmt (&si
))
1637 stmt
= gsi_stmt (si
);
1638 gimple_set_modified (stmt
, true);
1640 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1642 fprintf (dump_file
, " Folded to: ");
1643 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
1647 /* We only need to consider cases that can yield a gimple operand. */
1648 if (gimple_assign_single_p (stmt
))
1649 rhs
= gimple_assign_rhs1 (stmt
);
1650 else if (gimple_code (stmt
) == GIMPLE_GOTO
)
1651 rhs
= gimple_goto_dest (stmt
);
1652 else if (gswitch
*swtch_stmt
= dyn_cast
<gswitch
*> (stmt
))
1653 /* This should never be an ADDR_EXPR. */
1654 rhs
= gimple_switch_index (swtch_stmt
);
1656 if (rhs
&& TREE_CODE (rhs
) == ADDR_EXPR
)
1657 recompute_tree_invariant_for_addr_expr (rhs
);
1659 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
1660 even if fold_stmt updated the stmt already and thus cleared
1661 gimple_modified_p flag on it. */
1665 /* Check for redundant computations. Do this optimization only
1666 for assignments that have no volatile ops and conditionals. */
1667 may_optimize_p
= (!gimple_has_side_effects (stmt
)
1668 && (is_gimple_assign (stmt
)
1669 || (is_gimple_call (stmt
)
1670 && gimple_call_lhs (stmt
) != NULL_TREE
)
1671 || gimple_code (stmt
) == GIMPLE_COND
1672 || gimple_code (stmt
) == GIMPLE_SWITCH
));
1676 if (gimple_code (stmt
) == GIMPLE_CALL
)
1678 /* Resolve __builtin_constant_p. If it hasn't been
1679 folded to integer_one_node by now, it's fairly
1680 certain that the value simply isn't constant. */
1681 tree callee
= gimple_call_fndecl (stmt
);
1683 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
1684 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_CONSTANT_P
)
1686 propagate_tree_value_into_stmt (&si
, integer_zero_node
);
1687 stmt
= gsi_stmt (si
);
1691 if (gimple_code (stmt
) == GIMPLE_COND
)
1693 tree lhs
= gimple_cond_lhs (stmt
);
1694 tree rhs
= gimple_cond_rhs (stmt
);
1696 /* If the LHS has a range [0..1] and the RHS has a range ~[0..1],
1697 then this conditional is computable at compile time. We can just
1698 shove either 0 or 1 into the LHS, mark the statement as modified
1699 and all the right things will just happen below.
1701 Note this would apply to any case where LHS has a range
1702 narrower than its type implies and RHS is outside that
1703 narrower range. Future work. */
1704 if (TREE_CODE (lhs
) == SSA_NAME
1705 && ssa_name_has_boolean_range (lhs
)
1706 && TREE_CODE (rhs
) == INTEGER_CST
1707 && ! (integer_zerop (rhs
) || integer_onep (rhs
)))
1709 gimple_cond_set_lhs (as_a
<gcond
*> (stmt
),
1710 fold_convert (TREE_TYPE (lhs
),
1711 integer_zero_node
));
1712 gimple_set_modified (stmt
, true);
1716 update_stmt_if_modified (stmt
);
1717 eliminate_redundant_computations (&si
, const_and_copies
,
1719 stmt
= gsi_stmt (si
);
1721 /* Perform simple redundant store elimination. */
1722 if (gimple_assign_single_p (stmt
)
1723 && TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
1725 tree lhs
= gimple_assign_lhs (stmt
);
1726 tree rhs
= gimple_assign_rhs1 (stmt
);
1729 rhs
= dom_valueize (rhs
);
1730 /* Build a new statement with the RHS and LHS exchanged. */
1731 if (TREE_CODE (rhs
) == SSA_NAME
)
1733 gimple
*defstmt
= SSA_NAME_DEF_STMT (rhs
);
1734 new_stmt
= gimple_build_assign (rhs
, lhs
);
1735 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
1738 new_stmt
= gimple_build_assign (rhs
, lhs
);
1739 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
1740 cached_lhs
= avail_exprs_stack
->lookup_avail_expr (new_stmt
, false,
1743 && rhs
== cached_lhs
)
1745 basic_block bb
= gimple_bb (stmt
);
1746 unlink_stmt_vdef (stmt
);
1747 if (gsi_remove (&si
, true))
1749 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
1750 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1751 fprintf (dump_file
, " Flagged to clear EH edges.\n");
1753 release_defs (stmt
);
1759 /* Record any additional equivalences created by this statement. */
1760 if (is_gimple_assign (stmt
))
1761 record_equivalences_from_stmt (stmt
, may_optimize_p
, avail_exprs_stack
);
1763 /* If STMT is a COND_EXPR or SWITCH_EXPR and it was modified, then we may
1764 know where it goes. */
1765 if (gimple_modified_p (stmt
) || modified_p
)
1769 if (gimple_code (stmt
) == GIMPLE_COND
)
1770 val
= fold_binary_loc (gimple_location (stmt
),
1771 gimple_cond_code (stmt
), boolean_type_node
,
1772 gimple_cond_lhs (stmt
),
1773 gimple_cond_rhs (stmt
));
1774 else if (gswitch
*swtch_stmt
= dyn_cast
<gswitch
*> (stmt
))
1775 val
= gimple_switch_index (swtch_stmt
);
1777 if (val
&& TREE_CODE (val
) == INTEGER_CST
)
1779 retval
= find_taken_edge (bb
, val
);
1782 /* Fix the condition to be either true or false. */
1783 if (gimple_code (stmt
) == GIMPLE_COND
)
1785 if (integer_zerop (val
))
1786 gimple_cond_make_false (as_a
<gcond
*> (stmt
));
1787 else if (integer_onep (val
))
1788 gimple_cond_make_true (as_a
<gcond
*> (stmt
));
1792 gimple_set_modified (stmt
, true);
1795 /* Further simplifications may be possible. */
1800 update_stmt_if_modified (stmt
);
1802 /* If we simplified a statement in such a way as to be shown that it
1803 cannot trap, update the eh information and the cfg to match. */
1804 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
))
1806 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
1807 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1808 fprintf (dump_file
, " Flagged to clear EH edges.\n");
1812 && is_gimple_call (stmt
) && gimple_call_noreturn_p (stmt
))
1813 need_noreturn_fixup
.safe_push (stmt
);