1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2016 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "tree-pass.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
33 #include "gimple-fold.h"
35 #include "gimple-iterator.h"
37 #include "tree-into-ssa.h"
39 #include "tree-ssa-propagate.h"
40 #include "tree-ssa-threadupdate.h"
42 #include "tree-ssa-scopedtables.h"
43 #include "tree-ssa-threadedge.h"
44 #include "tree-ssa-dom.h"
46 #include "tree-cfgcleanup.h"
49 /* This file implements optimizations on the dominator tree. */
51 /* Structure for recording known values of a conditional expression
52 at the exits from its block. */
54 struct cond_equivalence
56 struct hashable_expr cond
;
60 /* Structure for recording edge equivalences.
62 Computing and storing the edge equivalences instead of creating
63 them on-demand can save significant amounts of time, particularly
64 for pathological cases involving switch statements.
66 These structures live for a single iteration of the dominator
67 optimizer in the edge's AUX field. At the end of an iteration we
68 free each of these structures. */
72 /* If this edge creates a simple equivalence, the LHS and RHS of
73 the equivalence will be stored here. */
77 /* Traversing an edge may also indicate one or more particular conditions
79 vec
<cond_equivalence
> cond_equivalences
;
82 /* Track whether or not we have changed the control flow graph. */
83 static bool cfg_altered
;
85 /* Bitmap of blocks that have had EH statements cleaned. We should
86 remove their dead edges eventually. */
87 static bitmap need_eh_cleanup
;
88 static vec
<gimple
*> need_noreturn_fixup
;
90 /* Statistics for dominator optimizations. */
94 long num_exprs_considered
;
100 static struct opt_stats_d opt_stats
;
102 /* Local functions. */
103 static edge
optimize_stmt (basic_block
, gimple_stmt_iterator
,
104 class const_and_copies
*,
105 class avail_exprs_stack
*);
106 static tree
lookup_avail_expr (gimple
*, bool, class avail_exprs_stack
*);
107 static void record_cond (cond_equivalence
*, class avail_exprs_stack
*);
108 static void record_equality (tree
, tree
, class const_and_copies
*);
109 static void record_equivalences_from_phis (basic_block
);
110 static void record_equivalences_from_incoming_edge (basic_block
,
111 class const_and_copies
*,
112 class avail_exprs_stack
*);
113 static void eliminate_redundant_computations (gimple_stmt_iterator
*,
114 class const_and_copies
*,
115 class avail_exprs_stack
*);
116 static void record_equivalences_from_stmt (gimple
*, int,
117 class avail_exprs_stack
*);
118 static edge
single_incoming_edge_ignoring_loop_edges (basic_block
);
119 static void dump_dominator_optimization_stats (FILE *file
,
120 hash_table
<expr_elt_hasher
> *);
123 /* Free the edge_info data attached to E, if it exists. */
126 free_dom_edge_info (edge e
)
128 struct edge_info
*edge_info
= (struct edge_info
*)e
->aux
;
132 edge_info
->cond_equivalences
.release ();
137 /* Allocate an EDGE_INFO for edge E and attach it to E.
138 Return the new EDGE_INFO structure. */
140 static struct edge_info
*
141 allocate_edge_info (edge e
)
143 struct edge_info
*edge_info
;
145 /* Free the old one, if it exists. */
146 free_dom_edge_info (e
);
148 edge_info
= XCNEW (struct edge_info
);
154 /* Free all EDGE_INFO structures associated with edges in the CFG.
155 If a particular edge can be threaded, copy the redirection
156 target from the EDGE_INFO structure into the edge's AUX field
157 as required by code to update the CFG and SSA graph for
161 free_all_edge_infos (void)
167 FOR_EACH_BB_FN (bb
, cfun
)
169 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
171 free_dom_edge_info (e
);
177 /* Build a cond_equivalence record indicating that the comparison
178 CODE holds between operands OP0 and OP1 and push it to **P. */
181 build_and_record_new_cond (enum tree_code code
,
183 vec
<cond_equivalence
> *p
,
187 struct hashable_expr
*cond
= &c
.cond
;
189 gcc_assert (TREE_CODE_CLASS (code
) == tcc_comparison
);
191 cond
->type
= boolean_type_node
;
192 cond
->kind
= EXPR_BINARY
;
193 cond
->ops
.binary
.op
= code
;
194 cond
->ops
.binary
.opnd0
= op0
;
195 cond
->ops
.binary
.opnd1
= op1
;
197 c
.value
= val
? boolean_true_node
: boolean_false_node
;
201 /* Record that COND is true and INVERTED is false into the edge information
202 structure. Also record that any conditions dominated by COND are true
205 For example, if a < b is true, then a <= b must also be true. */
208 record_conditions (struct edge_info
*edge_info
, tree cond
, tree inverted
)
213 if (!COMPARISON_CLASS_P (cond
))
216 op0
= TREE_OPERAND (cond
, 0);
217 op1
= TREE_OPERAND (cond
, 1);
219 switch (TREE_CODE (cond
))
223 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
225 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
226 &edge_info
->cond_equivalences
);
227 build_and_record_new_cond (LTGT_EXPR
, op0
, op1
,
228 &edge_info
->cond_equivalences
);
231 build_and_record_new_cond ((TREE_CODE (cond
) == LT_EXPR
232 ? LE_EXPR
: GE_EXPR
),
233 op0
, op1
, &edge_info
->cond_equivalences
);
234 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
235 &edge_info
->cond_equivalences
);
236 build_and_record_new_cond (EQ_EXPR
, op0
, op1
,
237 &edge_info
->cond_equivalences
, false);
242 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
244 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
245 &edge_info
->cond_equivalences
);
250 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
252 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
253 &edge_info
->cond_equivalences
);
255 build_and_record_new_cond (LE_EXPR
, op0
, op1
,
256 &edge_info
->cond_equivalences
);
257 build_and_record_new_cond (GE_EXPR
, op0
, op1
,
258 &edge_info
->cond_equivalences
);
262 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
263 &edge_info
->cond_equivalences
);
264 build_and_record_new_cond (UNLE_EXPR
, op0
, op1
,
265 &edge_info
->cond_equivalences
);
266 build_and_record_new_cond (UNGE_EXPR
, op0
, op1
,
267 &edge_info
->cond_equivalences
);
268 build_and_record_new_cond (UNEQ_EXPR
, op0
, op1
,
269 &edge_info
->cond_equivalences
);
270 build_and_record_new_cond (UNLT_EXPR
, op0
, op1
,
271 &edge_info
->cond_equivalences
);
272 build_and_record_new_cond (UNGT_EXPR
, op0
, op1
,
273 &edge_info
->cond_equivalences
);
278 build_and_record_new_cond ((TREE_CODE (cond
) == UNLT_EXPR
279 ? UNLE_EXPR
: UNGE_EXPR
),
280 op0
, op1
, &edge_info
->cond_equivalences
);
281 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
282 &edge_info
->cond_equivalences
);
286 build_and_record_new_cond (UNLE_EXPR
, op0
, op1
,
287 &edge_info
->cond_equivalences
);
288 build_and_record_new_cond (UNGE_EXPR
, op0
, op1
,
289 &edge_info
->cond_equivalences
);
293 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
294 &edge_info
->cond_equivalences
);
295 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
296 &edge_info
->cond_equivalences
);
303 /* Now store the original true and false conditions into the first
305 initialize_expr_from_cond (cond
, &c
.cond
);
306 c
.value
= boolean_true_node
;
307 edge_info
->cond_equivalences
.safe_push (c
);
309 /* It is possible for INVERTED to be the negation of a comparison,
310 and not a valid RHS or GIMPLE_COND condition. This happens because
311 invert_truthvalue may return such an expression when asked to invert
312 a floating-point comparison. These comparisons are not assumed to
313 obey the trichotomy law. */
314 initialize_expr_from_cond (inverted
, &c
.cond
);
315 c
.value
= boolean_false_node
;
316 edge_info
->cond_equivalences
.safe_push (c
);
319 /* Return TRUE is OP, an SSA_NAME has a range of values [0..1], false
322 This can be because it is a boolean type, any type with
323 a single bit of precision, or has known range of values
324 it might old of [0..1] via VRP analysis. */
327 ssa_name_has_boolean_range (tree op
)
329 /* Boolean types always have a range [0..1]. */
330 if (TREE_CODE (TREE_TYPE (op
)) == BOOLEAN_TYPE
)
333 /* An integral type with a single bit of precision. */
334 if (INTEGRAL_TYPE_P (TREE_TYPE (op
))
335 && TYPE_PRECISION (TREE_TYPE (op
)) == 1)
338 /* An integral type with more precision, but the object
339 only takes on values [0..1] as determined by VRP
342 if (INTEGRAL_TYPE_P (TREE_TYPE (op
))
343 && get_range_info (op
, &min
, &max
) == VR_RANGE
345 && wi::eq_p (max
, 1))
351 /* We have finished optimizing BB, record any information implied by
352 taking a specific outgoing edge from BB. */
355 record_edge_info (basic_block bb
)
357 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
358 struct edge_info
*edge_info
;
360 if (! gsi_end_p (gsi
))
362 gimple
*stmt
= gsi_stmt (gsi
);
363 location_t loc
= gimple_location (stmt
);
365 if (gimple_code (stmt
) == GIMPLE_SWITCH
)
367 gswitch
*switch_stmt
= as_a
<gswitch
*> (stmt
);
368 tree index
= gimple_switch_index (switch_stmt
);
370 if (TREE_CODE (index
) == SSA_NAME
)
373 int n_labels
= gimple_switch_num_labels (switch_stmt
);
374 tree
*info
= XCNEWVEC (tree
, last_basic_block_for_fn (cfun
));
378 for (i
= 0; i
< n_labels
; i
++)
380 tree label
= gimple_switch_label (switch_stmt
, i
);
381 basic_block target_bb
= label_to_block (CASE_LABEL (label
));
382 if (CASE_HIGH (label
)
384 || info
[target_bb
->index
])
385 info
[target_bb
->index
] = error_mark_node
;
387 info
[target_bb
->index
] = label
;
390 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
392 basic_block target_bb
= e
->dest
;
393 tree label
= info
[target_bb
->index
];
395 if (label
!= NULL
&& label
!= error_mark_node
)
397 tree x
= fold_convert_loc (loc
, TREE_TYPE (index
),
399 edge_info
= allocate_edge_info (e
);
400 edge_info
->lhs
= index
;
408 /* A COND_EXPR may create equivalences too. */
409 if (gimple_code (stmt
) == GIMPLE_COND
)
414 tree op0
= gimple_cond_lhs (stmt
);
415 tree op1
= gimple_cond_rhs (stmt
);
416 enum tree_code code
= gimple_cond_code (stmt
);
418 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
420 /* Special case comparing booleans against a constant as we
421 know the value of OP0 on both arms of the branch. i.e., we
422 can record an equivalence for OP0 rather than COND. */
423 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
424 && TREE_CODE (op0
) == SSA_NAME
425 && ssa_name_has_boolean_range (op0
)
426 && is_gimple_min_invariant (op1
))
428 tree true_val
= fold_convert (TREE_TYPE (op0
),
430 tree false_val
= fold_convert (TREE_TYPE (op0
),
434 edge_info
= allocate_edge_info (true_edge
);
435 edge_info
->lhs
= op0
;
436 edge_info
->rhs
= (integer_zerop (op1
) ? false_val
: true_val
);
438 edge_info
= allocate_edge_info (false_edge
);
439 edge_info
->lhs
= op0
;
440 edge_info
->rhs
= (integer_zerop (op1
) ? true_val
: false_val
);
444 edge_info
= allocate_edge_info (true_edge
);
445 edge_info
->lhs
= op0
;
446 edge_info
->rhs
= (integer_zerop (op1
) ? true_val
: false_val
);
448 edge_info
= allocate_edge_info (false_edge
);
449 edge_info
->lhs
= op0
;
450 edge_info
->rhs
= (integer_zerop (op1
) ? false_val
: true_val
);
453 else if (is_gimple_min_invariant (op0
)
454 && (TREE_CODE (op1
) == SSA_NAME
455 || is_gimple_min_invariant (op1
)))
457 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
458 tree inverted
= invert_truthvalue_loc (loc
, cond
);
459 bool can_infer_simple_equiv
460 = !(HONOR_SIGNED_ZEROS (op0
)
461 && real_zerop (op0
));
462 struct edge_info
*edge_info
;
464 edge_info
= allocate_edge_info (true_edge
);
465 record_conditions (edge_info
, cond
, inverted
);
467 if (can_infer_simple_equiv
&& code
== EQ_EXPR
)
469 edge_info
->lhs
= op1
;
470 edge_info
->rhs
= op0
;
473 edge_info
= allocate_edge_info (false_edge
);
474 record_conditions (edge_info
, inverted
, cond
);
476 if (can_infer_simple_equiv
&& TREE_CODE (inverted
) == EQ_EXPR
)
478 edge_info
->lhs
= op1
;
479 edge_info
->rhs
= op0
;
483 else if (TREE_CODE (op0
) == SSA_NAME
484 && (TREE_CODE (op1
) == SSA_NAME
485 || is_gimple_min_invariant (op1
)))
487 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
488 tree inverted
= invert_truthvalue_loc (loc
, cond
);
489 bool can_infer_simple_equiv
490 = !(HONOR_SIGNED_ZEROS (op1
)
491 && (TREE_CODE (op1
) == SSA_NAME
|| real_zerop (op1
)));
492 struct edge_info
*edge_info
;
494 edge_info
= allocate_edge_info (true_edge
);
495 record_conditions (edge_info
, cond
, inverted
);
497 if (can_infer_simple_equiv
&& code
== EQ_EXPR
)
499 edge_info
->lhs
= op0
;
500 edge_info
->rhs
= op1
;
503 edge_info
= allocate_edge_info (false_edge
);
504 record_conditions (edge_info
, inverted
, cond
);
506 if (can_infer_simple_equiv
&& TREE_CODE (inverted
) == EQ_EXPR
)
508 edge_info
->lhs
= op0
;
509 edge_info
->rhs
= op1
;
514 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
519 class dom_opt_dom_walker
: public dom_walker
522 dom_opt_dom_walker (cdi_direction direction
,
523 class const_and_copies
*const_and_copies
,
524 class avail_exprs_stack
*avail_exprs_stack
)
525 : dom_walker (direction
, true),
526 m_const_and_copies (const_and_copies
),
527 m_avail_exprs_stack (avail_exprs_stack
),
528 m_dummy_cond (NULL
) {}
530 virtual edge
before_dom_children (basic_block
);
531 virtual void after_dom_children (basic_block
);
534 void thread_across_edge (edge
);
536 /* Unwindable equivalences, both const/copy and expression varieties. */
537 class const_and_copies
*m_const_and_copies
;
538 class avail_exprs_stack
*m_avail_exprs_stack
;
543 /* Jump threading, redundancy elimination and const/copy propagation.
545 This pass may expose new symbols that need to be renamed into SSA. For
546 every new symbol exposed, its corresponding bit will be set in
551 const pass_data pass_data_dominator
=
553 GIMPLE_PASS
, /* type */
555 OPTGROUP_NONE
, /* optinfo_flags */
556 TV_TREE_SSA_DOMINATOR_OPTS
, /* tv_id */
557 ( PROP_cfg
| PROP_ssa
), /* properties_required */
558 0, /* properties_provided */
559 0, /* properties_destroyed */
560 0, /* todo_flags_start */
561 ( TODO_cleanup_cfg
| TODO_update_ssa
), /* todo_flags_finish */
564 class pass_dominator
: public gimple_opt_pass
567 pass_dominator (gcc::context
*ctxt
)
568 : gimple_opt_pass (pass_data_dominator
, ctxt
),
569 may_peel_loop_headers_p (false)
572 /* opt_pass methods: */
573 opt_pass
* clone () { return new pass_dominator (m_ctxt
); }
574 void set_pass_param (unsigned int n
, bool param
)
577 may_peel_loop_headers_p
= param
;
579 virtual bool gate (function
*) { return flag_tree_dom
!= 0; }
580 virtual unsigned int execute (function
*);
583 /* This flag is used to prevent loops from being peeled repeatedly in jump
584 threading; it will be removed once we preserve loop structures throughout
585 the compilation -- we will be able to mark the affected loops directly in
586 jump threading, and avoid peeling them next time. */
587 bool may_peel_loop_headers_p
;
588 }; // class pass_dominator
591 pass_dominator::execute (function
*fun
)
593 memset (&opt_stats
, 0, sizeof (opt_stats
));
595 /* Create our hash tables. */
596 hash_table
<expr_elt_hasher
> *avail_exprs
597 = new hash_table
<expr_elt_hasher
> (1024);
598 class avail_exprs_stack
*avail_exprs_stack
599 = new class avail_exprs_stack (avail_exprs
);
600 class const_and_copies
*const_and_copies
= new class const_and_copies ();
601 need_eh_cleanup
= BITMAP_ALLOC (NULL
);
602 need_noreturn_fixup
.create (0);
604 calculate_dominance_info (CDI_DOMINATORS
);
607 /* We need to know loop structures in order to avoid destroying them
608 in jump threading. Note that we still can e.g. thread through loop
609 headers to an exit edge, or through loop header to the loop body, assuming
610 that we update the loop info.
612 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
613 to several overly conservative bail-outs in jump threading, case
614 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
615 missing. We should improve jump threading in future then
616 LOOPS_HAVE_PREHEADERS won't be needed here. */
617 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
| LOOPS_HAVE_SIMPLE_LATCHES
);
619 /* Initialize the value-handle array. */
620 threadedge_initialize_values ();
622 /* We need accurate information regarding back edges in the CFG
623 for jump threading; this may include back edges that are not part of
625 mark_dfs_back_edges ();
627 /* We want to create the edge info structures before the dominator walk
628 so that they'll be in place for the jump threader, particularly when
629 threading through a join block.
631 The conditions will be lazily updated with global equivalences as
632 we reach them during the dominator walk. */
634 FOR_EACH_BB_FN (bb
, fun
)
635 record_edge_info (bb
);
637 /* Recursively walk the dominator tree optimizing statements. */
638 dom_opt_dom_walker
walker (CDI_DOMINATORS
,
641 walker
.walk (fun
->cfg
->x_entry_block_ptr
);
643 /* Look for blocks where we cleared EDGE_EXECUTABLE on an outgoing
644 edge. When found, remove jump threads which contain any outgoing
645 edge from the affected block. */
648 FOR_EACH_BB_FN (bb
, fun
)
653 /* First see if there are any edges without EDGE_EXECUTABLE
656 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
658 if ((e
->flags
& EDGE_EXECUTABLE
) == 0)
665 /* If there were any such edges found, then remove jump threads
666 containing any edge leaving BB. */
668 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
669 remove_jump_threads_including (e
);
674 gimple_stmt_iterator gsi
;
676 FOR_EACH_BB_FN (bb
, fun
)
678 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
679 update_stmt_if_modified (gsi_stmt (gsi
));
683 /* If we exposed any new variables, go ahead and put them into
684 SSA form now, before we handle jump threading. This simplifies
685 interactions between rewriting of _DECL nodes into SSA form
686 and rewriting SSA_NAME nodes into SSA form after block
687 duplication and CFG manipulation. */
688 update_ssa (TODO_update_ssa
);
690 free_all_edge_infos ();
692 /* Thread jumps, creating duplicate blocks as needed. */
693 cfg_altered
|= thread_through_all_blocks (may_peel_loop_headers_p
);
696 free_dominance_info (CDI_DOMINATORS
);
698 /* Removal of statements may make some EH edges dead. Purge
699 such edges from the CFG as needed. */
700 if (!bitmap_empty_p (need_eh_cleanup
))
705 /* Jump threading may have created forwarder blocks from blocks
706 needing EH cleanup; the new successor of these blocks, which
707 has inherited from the original block, needs the cleanup.
708 Don't clear bits in the bitmap, as that can break the bitmap
710 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup
, 0, i
, bi
)
712 basic_block bb
= BASIC_BLOCK_FOR_FN (fun
, i
);
715 while (single_succ_p (bb
)
716 && (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
717 bb
= single_succ (bb
);
718 if (bb
== EXIT_BLOCK_PTR_FOR_FN (fun
))
720 if ((unsigned) bb
->index
!= i
)
721 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
724 gimple_purge_all_dead_eh_edges (need_eh_cleanup
);
725 bitmap_clear (need_eh_cleanup
);
728 /* Fixup stmts that became noreturn calls. This may require splitting
729 blocks and thus isn't possible during the dominator walk or before
730 jump threading finished. Do this in reverse order so we don't
731 inadvertedly remove a stmt we want to fixup by visiting a dominating
732 now noreturn call first. */
733 while (!need_noreturn_fixup
.is_empty ())
735 gimple
*stmt
= need_noreturn_fixup
.pop ();
736 if (dump_file
&& dump_flags
& TDF_DETAILS
)
738 fprintf (dump_file
, "Fixing up noreturn call ");
739 print_gimple_stmt (dump_file
, stmt
, 0, 0);
740 fprintf (dump_file
, "\n");
742 fixup_noreturn_call (stmt
);
745 statistics_counter_event (fun
, "Redundant expressions eliminated",
747 statistics_counter_event (fun
, "Constants propagated",
748 opt_stats
.num_const_prop
);
749 statistics_counter_event (fun
, "Copies propagated",
750 opt_stats
.num_copy_prop
);
752 /* Debugging dumps. */
753 if (dump_file
&& (dump_flags
& TDF_STATS
))
754 dump_dominator_optimization_stats (dump_file
, avail_exprs
);
756 loop_optimizer_finalize ();
758 /* Delete our main hashtable. */
762 /* Free asserted bitmaps and stacks. */
763 BITMAP_FREE (need_eh_cleanup
);
764 need_noreturn_fixup
.release ();
765 delete avail_exprs_stack
;
766 delete const_and_copies
;
768 /* Free the value-handle array. */
769 threadedge_finalize_values ();
777 make_pass_dominator (gcc::context
*ctxt
)
779 return new pass_dominator (ctxt
);
783 /* Given a conditional statement CONDSTMT, convert the
784 condition to a canonical form. */
787 canonicalize_comparison (gcond
*condstmt
)
793 gcc_assert (gimple_code (condstmt
) == GIMPLE_COND
);
795 op0
= gimple_cond_lhs (condstmt
);
796 op1
= gimple_cond_rhs (condstmt
);
798 code
= gimple_cond_code (condstmt
);
800 /* If it would be profitable to swap the operands, then do so to
801 canonicalize the statement, enabling better optimization.
803 By placing canonicalization of such expressions here we
804 transparently keep statements in canonical form, even
805 when the statement is modified. */
806 if (tree_swap_operands_p (op0
, op1
, false))
808 /* For relationals we need to swap the operands
809 and change the code. */
815 code
= swap_tree_comparison (code
);
817 gimple_cond_set_code (condstmt
, code
);
818 gimple_cond_set_lhs (condstmt
, op1
);
819 gimple_cond_set_rhs (condstmt
, op0
);
821 update_stmt (condstmt
);
826 /* A trivial wrapper so that we can present the generic jump
827 threading code with a simple API for simplifying statements. */
829 simplify_stmt_for_jump_threading (gimple
*stmt
,
830 gimple
*within_stmt ATTRIBUTE_UNUSED
,
831 class avail_exprs_stack
*avail_exprs_stack
)
833 return lookup_avail_expr (stmt
, false, avail_exprs_stack
);
836 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
839 dom_valueize (tree t
)
841 if (TREE_CODE (t
) == SSA_NAME
)
843 tree tem
= SSA_NAME_VALUE (t
);
850 /* Record into CONST_AND_COPIES and AVAIL_EXPRS_STACK any equivalences implied
851 by traversing edge E (which are cached in E->aux).
853 Callers are responsible for managing the unwinding markers. */
855 record_temporary_equivalences (edge e
,
856 class const_and_copies
*const_and_copies
,
857 class avail_exprs_stack
*avail_exprs_stack
)
860 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
862 /* If we have info associated with this edge, record it into
863 our equivalence tables. */
866 cond_equivalence
*eq
;
867 tree lhs
= edge_info
->lhs
;
868 tree rhs
= edge_info
->rhs
;
870 /* If we have a simple NAME = VALUE equivalence, record it. */
872 record_equality (lhs
, rhs
, const_and_copies
);
874 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
875 set via a widening type conversion, then we may be able to record
876 additional equivalences. */
878 && TREE_CODE (lhs
) == SSA_NAME
879 && TREE_CODE (rhs
) == INTEGER_CST
)
881 gimple
*defstmt
= SSA_NAME_DEF_STMT (lhs
);
884 && is_gimple_assign (defstmt
)
885 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt
)))
887 tree old_rhs
= gimple_assign_rhs1 (defstmt
);
889 /* If the conversion widens the original value and
890 the constant is in the range of the type of OLD_RHS,
891 then convert the constant and record the equivalence.
893 Note that int_fits_type_p does not check the precision
894 if the upper and lower bounds are OK. */
895 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs
))
896 && (TYPE_PRECISION (TREE_TYPE (lhs
))
897 > TYPE_PRECISION (TREE_TYPE (old_rhs
)))
898 && int_fits_type_p (rhs
, TREE_TYPE (old_rhs
)))
900 tree newval
= fold_convert (TREE_TYPE (old_rhs
), rhs
);
901 record_equality (old_rhs
, newval
, const_and_copies
);
906 /* If LHS is an SSA_NAME with a new equivalency then try if
907 stmts with uses of that LHS that dominate the edge destination
908 simplify and allow further equivalences to be recorded. */
909 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
912 imm_use_iterator iter
;
913 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
915 gimple
*use_stmt
= USE_STMT (use_p
);
917 /* Only bother to record more equivalences for lhs that
918 can be directly used by e->dest.
919 ??? If the code gets re-organized to a worklist to
920 catch more indirect opportunities and it is made to
921 handle PHIs then this should only consider use_stmts
922 in basic-blocks we have already visited. */
923 if (e
->dest
== gimple_bb (use_stmt
)
924 || !dominated_by_p (CDI_DOMINATORS
,
925 e
->dest
, gimple_bb (use_stmt
)))
927 tree lhs2
= gimple_get_lhs (use_stmt
);
928 if (lhs2
&& TREE_CODE (lhs2
) == SSA_NAME
)
931 = gimple_fold_stmt_to_constant_1 (use_stmt
, dom_valueize
,
932 no_follow_ssa_edges
);
934 && (TREE_CODE (res
) == SSA_NAME
935 || is_gimple_min_invariant (res
)))
936 record_equality (lhs2
, res
, const_and_copies
);
941 /* If we have 0 = COND or 1 = COND equivalences, record them
942 into our expression hash tables. */
943 for (i
= 0; edge_info
->cond_equivalences
.iterate (i
, &eq
); ++i
)
944 record_cond (eq
, avail_exprs_stack
);
948 /* Wrapper for common code to attempt to thread an edge. For example,
949 it handles lazily building the dummy condition and the bookkeeping
950 when jump threading is successful. */
953 dom_opt_dom_walker::thread_across_edge (edge e
)
957 gimple_build_cond (NE_EXPR
,
958 integer_zero_node
, integer_zero_node
,
961 /* Push a marker on both stacks so we can unwind the tables back to their
963 m_avail_exprs_stack
->push_marker ();
964 m_const_and_copies
->push_marker ();
966 /* Traversing E may result in equivalences we can utilize. */
967 record_temporary_equivalences (e
, m_const_and_copies
, m_avail_exprs_stack
);
969 /* With all the edge equivalences in the tables, go ahead and attempt
970 to thread through E->dest. */
971 ::thread_across_edge (m_dummy_cond
, e
, false,
972 m_const_and_copies
, m_avail_exprs_stack
,
973 simplify_stmt_for_jump_threading
);
975 /* And restore the various tables to their state before
976 we threaded this edge.
978 XXX The code in tree-ssa-threadedge.c will restore the state of
979 the const_and_copies table. We we just have to restore the expression
981 m_avail_exprs_stack
->pop_to_marker ();
984 /* PHI nodes can create equivalences too.
986 Ignoring any alternatives which are the same as the result, if
987 all the alternatives are equal, then the PHI node creates an
991 record_equivalences_from_phis (basic_block bb
)
995 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
997 gphi
*phi
= gsi
.phi ();
999 tree lhs
= gimple_phi_result (phi
);
1003 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1005 tree t
= gimple_phi_arg_def (phi
, i
);
1007 /* Ignore alternatives which are the same as our LHS. Since
1008 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1009 can simply compare pointers. */
1013 /* If the associated edge is not marked as executable, then it
1015 if ((gimple_phi_arg_edge (phi
, i
)->flags
& EDGE_EXECUTABLE
) == 0)
1018 t
= dom_valueize (t
);
1020 /* If we have not processed an alternative yet, then set
1021 RHS to this alternative. */
1024 /* If we have processed an alternative (stored in RHS), then
1025 see if it is equal to this one. If it isn't, then stop
1027 else if (! operand_equal_for_phi_arg_p (rhs
, t
))
1031 /* If we had no interesting alternatives, then all the RHS alternatives
1032 must have been the same as LHS. */
1036 /* If we managed to iterate through each PHI alternative without
1037 breaking out of the loop, then we have a PHI which may create
1038 a useful equivalence. We do not need to record unwind data for
1039 this, since this is a true assignment and not an equivalence
1040 inferred from a comparison. All uses of this ssa name are dominated
1041 by this assignment, so unwinding just costs time and space. */
1042 if (i
== gimple_phi_num_args (phi
)
1043 && may_propagate_copy (lhs
, rhs
))
1044 set_ssa_name_value (lhs
, rhs
);
1048 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1049 return that edge. Otherwise return NULL. */
1051 single_incoming_edge_ignoring_loop_edges (basic_block bb
)
1057 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1059 /* A loop back edge can be identified by the destination of
1060 the edge dominating the source of the edge. */
1061 if (dominated_by_p (CDI_DOMINATORS
, e
->src
, e
->dest
))
1064 /* We can safely ignore edges that are not executable. */
1065 if ((e
->flags
& EDGE_EXECUTABLE
) == 0)
1068 /* If we have already seen a non-loop edge, then we must have
1069 multiple incoming non-loop edges and thus we return NULL. */
1073 /* This is the first non-loop incoming edge we have found. Record
1081 /* Record any equivalences created by the incoming edge to BB into
1082 CONST_AND_COPIES and AVAIL_EXPRS_STACK. If BB has more than one
1083 incoming edge, then no equivalence is created. */
1086 record_equivalences_from_incoming_edge (basic_block bb
,
1087 class const_and_copies
*const_and_copies
,
1088 class avail_exprs_stack
*avail_exprs_stack
)
1093 /* If our parent block ended with a control statement, then we may be
1094 able to record some equivalences based on which outgoing edge from
1095 the parent was followed. */
1096 parent
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
1098 e
= single_incoming_edge_ignoring_loop_edges (bb
);
1100 /* If we had a single incoming edge from our parent block, then enter
1101 any data associated with the edge into our tables. */
1102 if (e
&& e
->src
== parent
)
1103 record_temporary_equivalences (e
, const_and_copies
, avail_exprs_stack
);
1106 /* Dump statistics for the hash table HTAB. */
1109 htab_statistics (FILE *file
, const hash_table
<expr_elt_hasher
> &htab
)
1111 fprintf (file
, "size %ld, %ld elements, %f collision/search ratio\n",
1112 (long) htab
.size (),
1113 (long) htab
.elements (),
1114 htab
.collisions ());
1117 /* Dump SSA statistics on FILE. */
1120 dump_dominator_optimization_stats (FILE *file
,
1121 hash_table
<expr_elt_hasher
> *avail_exprs
)
1123 fprintf (file
, "Total number of statements: %6ld\n\n",
1124 opt_stats
.num_stmts
);
1125 fprintf (file
, "Exprs considered for dominator optimizations: %6ld\n",
1126 opt_stats
.num_exprs_considered
);
1128 fprintf (file
, "\nHash table statistics:\n");
1130 fprintf (file
, " avail_exprs: ");
1131 htab_statistics (file
, *avail_exprs
);
1135 /* Enter condition equivalence P into AVAIL_EXPRS_HASH.
1137 This indicates that a conditional expression has a known
1141 record_cond (cond_equivalence
*p
,
1142 class avail_exprs_stack
*avail_exprs_stack
)
1144 class expr_hash_elt
*element
= new expr_hash_elt (&p
->cond
, p
->value
);
1145 expr_hash_elt
**slot
;
1147 hash_table
<expr_elt_hasher
> *avail_exprs
= avail_exprs_stack
->avail_exprs ();
1148 slot
= avail_exprs
->find_slot_with_hash (element
, element
->hash (), INSERT
);
1152 avail_exprs_stack
->record_expr (element
, NULL
, '1');
1158 /* Return the loop depth of the basic block of the defining statement of X.
1159 This number should not be treated as absolutely correct because the loop
1160 information may not be completely up-to-date when dom runs. However, it
1161 will be relatively correct, and as more passes are taught to keep loop info
1162 up to date, the result will become more and more accurate. */
1165 loop_depth_of_name (tree x
)
1170 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1171 if (TREE_CODE (x
) != SSA_NAME
)
1174 /* Otherwise return the loop depth of the defining statement's bb.
1175 Note that there may not actually be a bb for this statement, if the
1176 ssa_name is live on entry. */
1177 defstmt
= SSA_NAME_DEF_STMT (x
);
1178 defbb
= gimple_bb (defstmt
);
1182 return bb_loop_depth (defbb
);
1185 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1186 This constrains the cases in which we may treat this as assignment. */
1189 record_equality (tree x
, tree y
, class const_and_copies
*const_and_copies
)
1191 tree prev_x
= NULL
, prev_y
= NULL
;
1193 if (tree_swap_operands_p (x
, y
, false))
1196 /* Most of the time tree_swap_operands_p does what we want. But there
1197 are cases where we know one operand is better for copy propagation than
1198 the other. Given no other code cares about ordering of equality
1199 comparison operators for that purpose, we just handle the special cases
1201 if (TREE_CODE (x
) == SSA_NAME
&& TREE_CODE (y
) == SSA_NAME
)
1203 /* If one operand is a single use operand, then make it
1204 X. This will preserve its single use properly and if this
1205 conditional is eliminated, the computation of X can be
1206 eliminated as well. */
1207 if (has_single_use (y
) && ! has_single_use (x
))
1210 if (TREE_CODE (x
) == SSA_NAME
)
1211 prev_x
= SSA_NAME_VALUE (x
);
1212 if (TREE_CODE (y
) == SSA_NAME
)
1213 prev_y
= SSA_NAME_VALUE (y
);
1215 /* If one of the previous values is invariant, or invariant in more loops
1216 (by depth), then use that.
1217 Otherwise it doesn't matter which value we choose, just so
1218 long as we canonicalize on one value. */
1219 if (is_gimple_min_invariant (y
))
1221 else if (is_gimple_min_invariant (x
)
1222 /* ??? When threading over backedges the following is important
1223 for correctness. See PR61757. */
1224 || (loop_depth_of_name (x
) < loop_depth_of_name (y
)))
1225 prev_x
= x
, x
= y
, y
= prev_x
, prev_x
= prev_y
;
1226 else if (prev_x
&& is_gimple_min_invariant (prev_x
))
1227 x
= y
, y
= prev_x
, prev_x
= prev_y
;
1231 /* After the swapping, we must have one SSA_NAME. */
1232 if (TREE_CODE (x
) != SSA_NAME
)
1235 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1236 variable compared against zero. If we're honoring signed zeros,
1237 then we cannot record this value unless we know that the value is
1239 if (HONOR_SIGNED_ZEROS (x
)
1240 && (TREE_CODE (y
) != REAL_CST
1241 || real_equal (&dconst0
, &TREE_REAL_CST (y
))))
1244 const_and_copies
->record_const_or_copy (x
, y
, prev_x
);
1247 /* Returns true when STMT is a simple iv increment. It detects the
1248 following situation:
1250 i_1 = phi (..., i_2)
1251 i_2 = i_1 +/- ... */
1254 simple_iv_increment_p (gimple
*stmt
)
1256 enum tree_code code
;
1261 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1264 lhs
= gimple_assign_lhs (stmt
);
1265 if (TREE_CODE (lhs
) != SSA_NAME
)
1268 code
= gimple_assign_rhs_code (stmt
);
1269 if (code
!= PLUS_EXPR
1270 && code
!= MINUS_EXPR
1271 && code
!= POINTER_PLUS_EXPR
)
1274 preinc
= gimple_assign_rhs1 (stmt
);
1275 if (TREE_CODE (preinc
) != SSA_NAME
)
1278 phi
= SSA_NAME_DEF_STMT (preinc
);
1279 if (gimple_code (phi
) != GIMPLE_PHI
)
1282 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1283 if (gimple_phi_arg_def (phi
, i
) == lhs
)
1289 /* Propagate know values from SSA_NAME_VALUE into the PHI nodes of the
1290 successors of BB. */
1293 cprop_into_successor_phis (basic_block bb
,
1294 class const_and_copies
*const_and_copies
)
1299 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1304 /* If this is an abnormal edge, then we do not want to copy propagate
1305 into the PHI alternative associated with this edge. */
1306 if (e
->flags
& EDGE_ABNORMAL
)
1309 gsi
= gsi_start_phis (e
->dest
);
1310 if (gsi_end_p (gsi
))
1313 /* We may have an equivalence associated with this edge. While
1314 we can not propagate it into non-dominated blocks, we can
1315 propagate them into PHIs in non-dominated blocks. */
1317 /* Push the unwind marker so we can reset the const and copies
1318 table back to its original state after processing this edge. */
1319 const_and_copies
->push_marker ();
1321 /* Extract and record any simple NAME = VALUE equivalences.
1323 Don't bother with [01] = COND equivalences, they're not useful
1325 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
1328 tree lhs
= edge_info
->lhs
;
1329 tree rhs
= edge_info
->rhs
;
1331 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
1332 const_and_copies
->record_const_or_copy (lhs
, rhs
);
1336 for ( ; !gsi_end_p (gsi
); gsi_next (&gsi
))
1339 use_operand_p orig_p
;
1341 gphi
*phi
= gsi
.phi ();
1343 /* The alternative may be associated with a constant, so verify
1344 it is an SSA_NAME before doing anything with it. */
1345 orig_p
= gimple_phi_arg_imm_use_ptr (phi
, indx
);
1346 orig_val
= get_use_from_ptr (orig_p
);
1347 if (TREE_CODE (orig_val
) != SSA_NAME
)
1350 /* If we have *ORIG_P in our constant/copy table, then replace
1351 ORIG_P with its value in our constant/copy table. */
1352 new_val
= SSA_NAME_VALUE (orig_val
);
1354 && new_val
!= orig_val
1355 && (TREE_CODE (new_val
) == SSA_NAME
1356 || is_gimple_min_invariant (new_val
))
1357 && may_propagate_copy (orig_val
, new_val
))
1358 propagate_value (orig_p
, new_val
);
1361 const_and_copies
->pop_to_marker ();
1366 dom_opt_dom_walker::before_dom_children (basic_block bb
)
1368 gimple_stmt_iterator gsi
;
1370 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1371 fprintf (dump_file
, "\n\nOptimizing block #%d\n\n", bb
->index
);
1373 /* Push a marker on the stacks of local information so that we know how
1374 far to unwind when we finalize this block. */
1375 m_avail_exprs_stack
->push_marker ();
1376 m_const_and_copies
->push_marker ();
1378 record_equivalences_from_incoming_edge (bb
, m_const_and_copies
,
1379 m_avail_exprs_stack
);
1381 /* PHI nodes can create equivalences too. */
1382 record_equivalences_from_phis (bb
);
1384 /* Create equivalences from redundant PHIs. PHIs are only truly
1385 redundant when they exist in the same block, so push another
1386 marker and unwind right afterwards. */
1387 m_avail_exprs_stack
->push_marker ();
1388 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1389 eliminate_redundant_computations (&gsi
, m_const_and_copies
,
1390 m_avail_exprs_stack
);
1391 m_avail_exprs_stack
->pop_to_marker ();
1393 edge taken_edge
= NULL
;
1394 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1396 = optimize_stmt (bb
, gsi
, m_const_and_copies
, m_avail_exprs_stack
);
1398 /* Now prepare to process dominated blocks. */
1399 record_edge_info (bb
);
1400 cprop_into_successor_phis (bb
, m_const_and_copies
);
1401 if (taken_edge
&& !dbg_cnt (dom_unreachable_edges
))
1407 /* We have finished processing the dominator children of BB, perform
1408 any finalization actions in preparation for leaving this node in
1409 the dominator tree. */
1412 dom_opt_dom_walker::after_dom_children (basic_block bb
)
1416 /* If we have an outgoing edge to a block with multiple incoming and
1417 outgoing edges, then we may be able to thread the edge, i.e., we
1418 may be able to statically determine which of the outgoing edges
1419 will be traversed when the incoming edge from BB is traversed. */
1420 if (single_succ_p (bb
)
1421 && (single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
) == 0
1422 && potentially_threadable_block (single_succ (bb
)))
1424 thread_across_edge (single_succ_edge (bb
));
1426 else if ((last
= last_stmt (bb
))
1427 && gimple_code (last
) == GIMPLE_COND
1428 && EDGE_COUNT (bb
->succs
) == 2
1429 && (EDGE_SUCC (bb
, 0)->flags
& EDGE_ABNORMAL
) == 0
1430 && (EDGE_SUCC (bb
, 1)->flags
& EDGE_ABNORMAL
) == 0)
1432 edge true_edge
, false_edge
;
1434 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
1436 /* Only try to thread the edge if it reaches a target block with
1437 more than one predecessor and more than one successor. */
1438 if (potentially_threadable_block (true_edge
->dest
))
1439 thread_across_edge (true_edge
);
1441 /* Similarly for the ELSE arm. */
1442 if (potentially_threadable_block (false_edge
->dest
))
1443 thread_across_edge (false_edge
);
1447 /* These remove expressions local to BB from the tables. */
1448 m_avail_exprs_stack
->pop_to_marker ();
1449 m_const_and_copies
->pop_to_marker ();
1452 /* Search for redundant computations in STMT. If any are found, then
1453 replace them with the variable holding the result of the computation.
1455 If safe, record this expression into AVAIL_EXPRS_STACK and
1456 CONST_AND_COPIES. */
1459 eliminate_redundant_computations (gimple_stmt_iterator
* gsi
,
1460 class const_and_copies
*const_and_copies
,
1461 class avail_exprs_stack
*avail_exprs_stack
)
1467 bool assigns_var_p
= false;
1469 gimple
*stmt
= gsi_stmt (*gsi
);
1471 if (gimple_code (stmt
) == GIMPLE_PHI
)
1472 def
= gimple_phi_result (stmt
);
1474 def
= gimple_get_lhs (stmt
);
1476 /* Certain expressions on the RHS can be optimized away, but can not
1477 themselves be entered into the hash tables. */
1479 || TREE_CODE (def
) != SSA_NAME
1480 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def
)
1481 || gimple_vdef (stmt
)
1482 /* Do not record equivalences for increments of ivs. This would create
1483 overlapping live ranges for a very questionable gain. */
1484 || simple_iv_increment_p (stmt
))
1487 /* Check if the expression has been computed before. */
1488 cached_lhs
= lookup_avail_expr (stmt
, insert
, avail_exprs_stack
);
1490 opt_stats
.num_exprs_considered
++;
1492 /* Get the type of the expression we are trying to optimize. */
1493 if (is_gimple_assign (stmt
))
1495 expr_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
1496 assigns_var_p
= true;
1498 else if (gimple_code (stmt
) == GIMPLE_COND
)
1499 expr_type
= boolean_type_node
;
1500 else if (is_gimple_call (stmt
))
1502 gcc_assert (gimple_call_lhs (stmt
));
1503 expr_type
= TREE_TYPE (gimple_call_lhs (stmt
));
1504 assigns_var_p
= true;
1506 else if (gswitch
*swtch_stmt
= dyn_cast
<gswitch
*> (stmt
))
1507 expr_type
= TREE_TYPE (gimple_switch_index (swtch_stmt
));
1508 else if (gimple_code (stmt
) == GIMPLE_PHI
)
1509 /* We can't propagate into a phi, so the logic below doesn't apply.
1510 Instead record an equivalence between the cached LHS and the
1511 PHI result of this statement, provided they are in the same block.
1512 This should be sufficient to kill the redundant phi. */
1514 if (def
&& cached_lhs
)
1515 const_and_copies
->record_const_or_copy (def
, cached_lhs
);
1524 /* It is safe to ignore types here since we have already done
1525 type checking in the hashing and equality routines. In fact
1526 type checking here merely gets in the way of constant
1527 propagation. Also, make sure that it is safe to propagate
1528 CACHED_LHS into the expression in STMT. */
1529 if ((TREE_CODE (cached_lhs
) != SSA_NAME
1531 || useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
))))
1532 || may_propagate_copy_into_stmt (stmt
, cached_lhs
))
1534 gcc_checking_assert (TREE_CODE (cached_lhs
) == SSA_NAME
1535 || is_gimple_min_invariant (cached_lhs
));
1537 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1539 fprintf (dump_file
, " Replaced redundant expr '");
1540 print_gimple_expr (dump_file
, stmt
, 0, dump_flags
);
1541 fprintf (dump_file
, "' with '");
1542 print_generic_expr (dump_file
, cached_lhs
, dump_flags
);
1543 fprintf (dump_file
, "'\n");
1549 && !useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
)))
1550 cached_lhs
= fold_convert (expr_type
, cached_lhs
);
1552 propagate_tree_value_into_stmt (gsi
, cached_lhs
);
1554 /* Since it is always necessary to mark the result as modified,
1555 perhaps we should move this into propagate_tree_value_into_stmt
1557 gimple_set_modified (gsi_stmt (*gsi
), true);
1561 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1562 the available expressions table or the const_and_copies table.
1563 Detect and record those equivalences into AVAIL_EXPRS_STACK.
1565 We handle only very simple copy equivalences here. The heavy
1566 lifing is done by eliminate_redundant_computations. */
1569 record_equivalences_from_stmt (gimple
*stmt
, int may_optimize_p
,
1570 class avail_exprs_stack
*avail_exprs_stack
)
1573 enum tree_code lhs_code
;
1575 gcc_assert (is_gimple_assign (stmt
));
1577 lhs
= gimple_assign_lhs (stmt
);
1578 lhs_code
= TREE_CODE (lhs
);
1580 if (lhs_code
== SSA_NAME
1581 && gimple_assign_single_p (stmt
))
1583 tree rhs
= gimple_assign_rhs1 (stmt
);
1585 /* If the RHS of the assignment is a constant or another variable that
1586 may be propagated, register it in the CONST_AND_COPIES table. We
1587 do not need to record unwind data for this, since this is a true
1588 assignment and not an equivalence inferred from a comparison. All
1589 uses of this ssa name are dominated by this assignment, so unwinding
1590 just costs time and space. */
1592 && (TREE_CODE (rhs
) == SSA_NAME
1593 || is_gimple_min_invariant (rhs
)))
1595 rhs
= dom_valueize (rhs
);
1597 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1599 fprintf (dump_file
, "==== ASGN ");
1600 print_generic_expr (dump_file
, lhs
, 0);
1601 fprintf (dump_file
, " = ");
1602 print_generic_expr (dump_file
, rhs
, 0);
1603 fprintf (dump_file
, "\n");
1606 set_ssa_name_value (lhs
, rhs
);
1610 /* Make sure we can propagate &x + CST. */
1611 if (lhs_code
== SSA_NAME
1612 && gimple_assign_rhs_code (stmt
) == POINTER_PLUS_EXPR
1613 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == ADDR_EXPR
1614 && TREE_CODE (gimple_assign_rhs2 (stmt
)) == INTEGER_CST
)
1616 tree op0
= gimple_assign_rhs1 (stmt
);
1617 tree op1
= gimple_assign_rhs2 (stmt
);
1619 = build_fold_addr_expr (fold_build2 (MEM_REF
,
1620 TREE_TYPE (TREE_TYPE (op0
)),
1622 fold_convert (ptr_type_node
,
1624 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1626 fprintf (dump_file
, "==== ASGN ");
1627 print_generic_expr (dump_file
, lhs
, 0);
1628 fprintf (dump_file
, " = ");
1629 print_generic_expr (dump_file
, new_rhs
, 0);
1630 fprintf (dump_file
, "\n");
1633 set_ssa_name_value (lhs
, new_rhs
);
1636 /* A memory store, even an aliased store, creates a useful
1637 equivalence. By exchanging the LHS and RHS, creating suitable
1638 vops and recording the result in the available expression table,
1639 we may be able to expose more redundant loads. */
1640 if (!gimple_has_volatile_ops (stmt
)
1641 && gimple_references_memory_p (stmt
)
1642 && gimple_assign_single_p (stmt
)
1643 && (TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
1644 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt
)))
1645 && !is_gimple_reg (lhs
))
1647 tree rhs
= gimple_assign_rhs1 (stmt
);
1650 /* Build a new statement with the RHS and LHS exchanged. */
1651 if (TREE_CODE (rhs
) == SSA_NAME
)
1653 /* NOTE tuples. The call to gimple_build_assign below replaced
1654 a call to build_gimple_modify_stmt, which did not set the
1655 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
1656 may cause an SSA validation failure, as the LHS may be a
1657 default-initialized name and should have no definition. I'm
1658 a bit dubious of this, as the artificial statement that we
1659 generate here may in fact be ill-formed, but it is simply
1660 used as an internal device in this pass, and never becomes
1662 gimple
*defstmt
= SSA_NAME_DEF_STMT (rhs
);
1663 new_stmt
= gimple_build_assign (rhs
, lhs
);
1664 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
1667 new_stmt
= gimple_build_assign (rhs
, lhs
);
1669 gimple_set_vuse (new_stmt
, gimple_vdef (stmt
));
1671 /* Finally enter the statement into the available expression
1673 lookup_avail_expr (new_stmt
, true, avail_exprs_stack
);
1677 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1678 CONST_AND_COPIES. */
1681 cprop_operand (gimple
*stmt
, use_operand_p op_p
)
1684 tree op
= USE_FROM_PTR (op_p
);
1686 /* If the operand has a known constant value or it is known to be a
1687 copy of some other variable, use the value or copy stored in
1688 CONST_AND_COPIES. */
1689 val
= SSA_NAME_VALUE (op
);
1690 if (val
&& val
!= op
)
1692 /* Do not replace hard register operands in asm statements. */
1693 if (gimple_code (stmt
) == GIMPLE_ASM
1694 && !may_propagate_copy_into_asm (op
))
1697 /* Certain operands are not allowed to be copy propagated due
1698 to their interaction with exception handling and some GCC
1700 if (!may_propagate_copy (op
, val
))
1703 /* Do not propagate copies into BIVs.
1704 See PR23821 and PR62217 for how this can disturb IV and
1705 number of iteration analysis. */
1706 if (TREE_CODE (val
) != INTEGER_CST
)
1708 gimple
*def
= SSA_NAME_DEF_STMT (op
);
1709 if (gimple_code (def
) == GIMPLE_PHI
1710 && gimple_bb (def
)->loop_father
->header
== gimple_bb (def
))
1715 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1717 fprintf (dump_file
, " Replaced '");
1718 print_generic_expr (dump_file
, op
, dump_flags
);
1719 fprintf (dump_file
, "' with %s '",
1720 (TREE_CODE (val
) != SSA_NAME
? "constant" : "variable"));
1721 print_generic_expr (dump_file
, val
, dump_flags
);
1722 fprintf (dump_file
, "'\n");
1725 if (TREE_CODE (val
) != SSA_NAME
)
1726 opt_stats
.num_const_prop
++;
1728 opt_stats
.num_copy_prop
++;
1730 propagate_value (op_p
, val
);
1732 /* And note that we modified this statement. This is now
1733 safe, even if we changed virtual operands since we will
1734 rescan the statement and rewrite its operands again. */
1735 gimple_set_modified (stmt
, true);
1739 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1740 known value for that SSA_NAME (or NULL if no value is known).
1742 Propagate values from CONST_AND_COPIES into the uses, vuses and
1743 vdef_ops of STMT. */
1746 cprop_into_stmt (gimple
*stmt
)
1751 FOR_EACH_SSA_USE_OPERAND (op_p
, stmt
, iter
, SSA_OP_USE
)
1752 cprop_operand (stmt
, op_p
);
1755 /* Optimize the statement in block BB pointed to by iterator SI
1756 using equivalences from CONST_AND_COPIES and AVAIL_EXPRS_STACK.
1758 We try to perform some simplistic global redundancy elimination and
1759 constant propagation:
1761 1- To detect global redundancy, we keep track of expressions that have
1762 been computed in this block and its dominators. If we find that the
1763 same expression is computed more than once, we eliminate repeated
1764 computations by using the target of the first one.
1766 2- Constant values and copy assignments. This is used to do very
1767 simplistic constant and copy propagation. When a constant or copy
1768 assignment is found, we map the value on the RHS of the assignment to
1769 the variable in the LHS in the CONST_AND_COPIES table. */
1772 optimize_stmt (basic_block bb
, gimple_stmt_iterator si
,
1773 class const_and_copies
*const_and_copies
,
1774 class avail_exprs_stack
*avail_exprs_stack
)
1776 gimple
*stmt
, *old_stmt
;
1777 bool may_optimize_p
;
1778 bool modified_p
= false;
1782 old_stmt
= stmt
= gsi_stmt (si
);
1783 was_noreturn
= is_gimple_call (stmt
) && gimple_call_noreturn_p (stmt
);
1785 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1787 fprintf (dump_file
, "Optimizing statement ");
1788 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
1791 if (gimple_code (stmt
) == GIMPLE_COND
)
1792 canonicalize_comparison (as_a
<gcond
*> (stmt
));
1794 update_stmt_if_modified (stmt
);
1795 opt_stats
.num_stmts
++;
1797 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
1798 cprop_into_stmt (stmt
);
1800 /* If the statement has been modified with constant replacements,
1801 fold its RHS before checking for redundant computations. */
1802 if (gimple_modified_p (stmt
))
1806 /* Try to fold the statement making sure that STMT is kept
1808 if (fold_stmt (&si
))
1810 stmt
= gsi_stmt (si
);
1811 gimple_set_modified (stmt
, true);
1813 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1815 fprintf (dump_file
, " Folded to: ");
1816 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
1820 /* We only need to consider cases that can yield a gimple operand. */
1821 if (gimple_assign_single_p (stmt
))
1822 rhs
= gimple_assign_rhs1 (stmt
);
1823 else if (gimple_code (stmt
) == GIMPLE_GOTO
)
1824 rhs
= gimple_goto_dest (stmt
);
1825 else if (gswitch
*swtch_stmt
= dyn_cast
<gswitch
*> (stmt
))
1826 /* This should never be an ADDR_EXPR. */
1827 rhs
= gimple_switch_index (swtch_stmt
);
1829 if (rhs
&& TREE_CODE (rhs
) == ADDR_EXPR
)
1830 recompute_tree_invariant_for_addr_expr (rhs
);
1832 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
1833 even if fold_stmt updated the stmt already and thus cleared
1834 gimple_modified_p flag on it. */
1838 /* Check for redundant computations. Do this optimization only
1839 for assignments that have no volatile ops and conditionals. */
1840 may_optimize_p
= (!gimple_has_side_effects (stmt
)
1841 && (is_gimple_assign (stmt
)
1842 || (is_gimple_call (stmt
)
1843 && gimple_call_lhs (stmt
) != NULL_TREE
)
1844 || gimple_code (stmt
) == GIMPLE_COND
1845 || gimple_code (stmt
) == GIMPLE_SWITCH
));
1849 if (gimple_code (stmt
) == GIMPLE_CALL
)
1851 /* Resolve __builtin_constant_p. If it hasn't been
1852 folded to integer_one_node by now, it's fairly
1853 certain that the value simply isn't constant. */
1854 tree callee
= gimple_call_fndecl (stmt
);
1856 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
1857 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_CONSTANT_P
)
1859 propagate_tree_value_into_stmt (&si
, integer_zero_node
);
1860 stmt
= gsi_stmt (si
);
1864 update_stmt_if_modified (stmt
);
1865 eliminate_redundant_computations (&si
, const_and_copies
,
1867 stmt
= gsi_stmt (si
);
1869 /* Perform simple redundant store elimination. */
1870 if (gimple_assign_single_p (stmt
)
1871 && TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
1873 tree lhs
= gimple_assign_lhs (stmt
);
1874 tree rhs
= gimple_assign_rhs1 (stmt
);
1877 rhs
= dom_valueize (rhs
);
1878 /* Build a new statement with the RHS and LHS exchanged. */
1879 if (TREE_CODE (rhs
) == SSA_NAME
)
1881 gimple
*defstmt
= SSA_NAME_DEF_STMT (rhs
);
1882 new_stmt
= gimple_build_assign (rhs
, lhs
);
1883 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
1886 new_stmt
= gimple_build_assign (rhs
, lhs
);
1887 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
1888 cached_lhs
= lookup_avail_expr (new_stmt
, false, avail_exprs_stack
);
1890 && rhs
== cached_lhs
)
1892 basic_block bb
= gimple_bb (stmt
);
1893 unlink_stmt_vdef (stmt
);
1894 if (gsi_remove (&si
, true))
1896 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
1897 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1898 fprintf (dump_file
, " Flagged to clear EH edges.\n");
1900 release_defs (stmt
);
1906 /* Record any additional equivalences created by this statement. */
1907 if (is_gimple_assign (stmt
))
1908 record_equivalences_from_stmt (stmt
, may_optimize_p
, avail_exprs_stack
);
1910 /* If STMT is a COND_EXPR or SWITCH_EXPR and it was modified, then we may
1911 know where it goes. */
1912 if (gimple_modified_p (stmt
) || modified_p
)
1916 update_stmt_if_modified (stmt
);
1918 if (gimple_code (stmt
) == GIMPLE_COND
)
1919 val
= fold_binary_loc (gimple_location (stmt
),
1920 gimple_cond_code (stmt
), boolean_type_node
,
1921 gimple_cond_lhs (stmt
), gimple_cond_rhs (stmt
));
1922 else if (gswitch
*swtch_stmt
= dyn_cast
<gswitch
*> (stmt
))
1923 val
= gimple_switch_index (swtch_stmt
);
1925 if (val
&& TREE_CODE (val
) == INTEGER_CST
)
1927 retval
= find_taken_edge (bb
, val
);
1930 /* Fix the condition to be either true or false. */
1931 if (gimple_code (stmt
) == GIMPLE_COND
)
1933 if (integer_zerop (val
))
1934 gimple_cond_make_false (as_a
<gcond
*> (stmt
));
1935 else if (integer_onep (val
))
1936 gimple_cond_make_true (as_a
<gcond
*> (stmt
));
1941 /* Further simplifications may be possible. */
1946 /* If we simplified a statement in such a way as to be shown that it
1947 cannot trap, update the eh information and the cfg to match. */
1948 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
))
1950 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
1951 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1952 fprintf (dump_file
, " Flagged to clear EH edges.\n");
1956 && is_gimple_call (stmt
) && gimple_call_noreturn_p (stmt
))
1957 need_noreturn_fixup
.safe_push (stmt
);
1962 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
1963 the desired memory state. */
1966 vuse_eq (ao_ref
*, tree vuse1
, unsigned int cnt
, void *data
)
1968 tree vuse2
= (tree
) data
;
1972 /* This bounds the stmt walks we perform on reference lookups
1973 to O(1) instead of O(N) where N is the number of dominating
1974 stores leading to a candidate. We re-use the SCCVN param
1975 for this as it is basically the same complexity. */
1976 if (cnt
> (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS
))
1982 /* Search for an existing instance of STMT in the AVAIL_EXPRS_STACK table.
1983 If found, return its LHS. Otherwise insert STMT in the table and
1986 Also, when an expression is first inserted in the table, it is also
1987 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
1988 we finish processing this block and its children. */
1991 lookup_avail_expr (gimple
*stmt
, bool insert
,
1992 class avail_exprs_stack
*avail_exprs_stack
)
1994 expr_hash_elt
**slot
;
1997 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
1998 if (gimple_code (stmt
) == GIMPLE_PHI
)
1999 lhs
= gimple_phi_result (stmt
);
2001 lhs
= gimple_get_lhs (stmt
);
2003 class expr_hash_elt
element (stmt
, lhs
);
2005 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2007 fprintf (dump_file
, "LKUP ");
2008 element
.print (dump_file
);
2011 /* Don't bother remembering constant assignments and copy operations.
2012 Constants and copy operations are handled by the constant/copy propagator
2013 in optimize_stmt. */
2014 if (element
.expr()->kind
== EXPR_SINGLE
2015 && (TREE_CODE (element
.expr()->ops
.single
.rhs
) == SSA_NAME
2016 || is_gimple_min_invariant (element
.expr()->ops
.single
.rhs
)))
2019 /* Finally try to find the expression in the main expression hash table. */
2020 hash_table
<expr_elt_hasher
> *avail_exprs
= avail_exprs_stack
->avail_exprs ();
2021 slot
= avail_exprs
->find_slot (&element
, (insert
? INSERT
: NO_INSERT
));
2026 else if (*slot
== NULL
)
2028 class expr_hash_elt
*element2
= new expr_hash_elt (element
);
2031 avail_exprs_stack
->record_expr (element2
, NULL
, '2');
2035 /* If we found a redundant memory operation do an alias walk to
2036 check if we can re-use it. */
2037 if (gimple_vuse (stmt
) != (*slot
)->vop ())
2039 tree vuse1
= (*slot
)->vop ();
2040 tree vuse2
= gimple_vuse (stmt
);
2041 /* If we have a load of a register and a candidate in the
2042 hash with vuse1 then try to reach its stmt by walking
2043 up the virtual use-def chain using walk_non_aliased_vuses.
2044 But don't do this when removing expressions from the hash. */
2046 if (!(vuse1
&& vuse2
2047 && gimple_assign_single_p (stmt
)
2048 && TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
2049 && (ao_ref_init (&ref
, gimple_assign_rhs1 (stmt
)), true)
2050 && walk_non_aliased_vuses (&ref
, vuse2
,
2051 vuse_eq
, NULL
, NULL
, vuse1
) != NULL
))
2055 class expr_hash_elt
*element2
= new expr_hash_elt (element
);
2057 /* Insert the expr into the hash by replacing the current
2058 entry and recording the value to restore in the
2059 avail_exprs_stack. */
2060 avail_exprs_stack
->record_expr (element2
, *slot
, '2');
2067 /* Extract the LHS of the assignment so that it can be used as the current
2068 definition of another variable. */
2069 lhs
= (*slot
)->lhs ();
2071 lhs
= dom_valueize (lhs
);
2073 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2075 fprintf (dump_file
, "FIND: ");
2076 print_generic_expr (dump_file
, lhs
, 0);
2077 fprintf (dump_file
, "\n");