]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-dom.c
Merger of git branch "gimple-classes-v2-option-3"
[thirdparty/gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "flags.h"
29 #include "tm_p.h"
30 #include "predict.h"
31 #include "vec.h"
32 #include "hashtab.h"
33 #include "hash-set.h"
34 #include "machmode.h"
35 #include "hard-reg-set.h"
36 #include "input.h"
37 #include "function.h"
38 #include "dominance.h"
39 #include "cfg.h"
40 #include "cfganal.h"
41 #include "basic-block.h"
42 #include "cfgloop.h"
43 #include "inchash.h"
44 #include "gimple-pretty-print.h"
45 #include "tree-ssa-alias.h"
46 #include "internal-fn.h"
47 #include "gimple-fold.h"
48 #include "tree-eh.h"
49 #include "gimple-expr.h"
50 #include "is-a.h"
51 #include "gimple.h"
52 #include "gimple-iterator.h"
53 #include "gimple-ssa.h"
54 #include "tree-cfg.h"
55 #include "tree-phinodes.h"
56 #include "ssa-iterators.h"
57 #include "stringpool.h"
58 #include "tree-ssanames.h"
59 #include "tree-into-ssa.h"
60 #include "domwalk.h"
61 #include "tree-pass.h"
62 #include "tree-ssa-propagate.h"
63 #include "tree-ssa-threadupdate.h"
64 #include "langhooks.h"
65 #include "params.h"
66 #include "tree-ssa-threadedge.h"
67 #include "tree-ssa-dom.h"
68 #include "inchash.h"
69
70 /* This file implements optimizations on the dominator tree. */
71
72 /* Representation of a "naked" right-hand-side expression, to be used
73 in recording available expressions in the expression hash table. */
74
75 enum expr_kind
76 {
77 EXPR_SINGLE,
78 EXPR_UNARY,
79 EXPR_BINARY,
80 EXPR_TERNARY,
81 EXPR_CALL,
82 EXPR_PHI
83 };
84
85 struct hashable_expr
86 {
87 tree type;
88 enum expr_kind kind;
89 union {
90 struct { tree rhs; } single;
91 struct { enum tree_code op; tree opnd; } unary;
92 struct { enum tree_code op; tree opnd0, opnd1; } binary;
93 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
94 struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
95 struct { size_t nargs; tree *args; } phi;
96 } ops;
97 };
98
99 /* Structure for recording known values of a conditional expression
100 at the exits from its block. */
101
102 typedef struct cond_equivalence_s
103 {
104 struct hashable_expr cond;
105 tree value;
106 } cond_equivalence;
107
108
109 /* Structure for recording edge equivalences as well as any pending
110 edge redirections during the dominator optimizer.
111
112 Computing and storing the edge equivalences instead of creating
113 them on-demand can save significant amounts of time, particularly
114 for pathological cases involving switch statements.
115
116 These structures live for a single iteration of the dominator
117 optimizer in the edge's AUX field. At the end of an iteration we
118 free each of these structures and update the AUX field to point
119 to any requested redirection target (the code for updating the
120 CFG and SSA graph for edge redirection expects redirection edge
121 targets to be in the AUX field for each edge. */
122
123 struct edge_info
124 {
125 /* If this edge creates a simple equivalence, the LHS and RHS of
126 the equivalence will be stored here. */
127 tree lhs;
128 tree rhs;
129
130 /* Traversing an edge may also indicate one or more particular conditions
131 are true or false. */
132 vec<cond_equivalence> cond_equivalences;
133 };
134
135 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
136 expressions it enters into the hash table along with a marker entry
137 (null). When we finish processing the block, we pop off entries and
138 remove the expressions from the global hash table until we hit the
139 marker. */
140 typedef struct expr_hash_elt * expr_hash_elt_t;
141
142 static vec<expr_hash_elt_t> avail_exprs_stack;
143
144 /* Structure for entries in the expression hash table. */
145
146 struct expr_hash_elt
147 {
148 /* The value (lhs) of this expression. */
149 tree lhs;
150
151 /* The expression (rhs) we want to record. */
152 struct hashable_expr expr;
153
154 /* The stmt pointer if this element corresponds to a statement. */
155 gimple stmt;
156
157 /* The hash value for RHS. */
158 hashval_t hash;
159
160 /* A unique stamp, typically the address of the hash
161 element itself, used in removing entries from the table. */
162 struct expr_hash_elt *stamp;
163 };
164
165 /* Hashtable helpers. */
166
167 static bool hashable_expr_equal_p (const struct hashable_expr *,
168 const struct hashable_expr *);
169 static void free_expr_hash_elt (void *);
170
171 struct expr_elt_hasher
172 {
173 typedef expr_hash_elt *value_type;
174 typedef expr_hash_elt *compare_type;
175 typedef int store_values_directly;
176 static inline hashval_t hash (const value_type &);
177 static inline bool equal (const value_type &, const compare_type &);
178 static inline void remove (value_type &);
179 };
180
181 inline hashval_t
182 expr_elt_hasher::hash (const value_type &p)
183 {
184 return p->hash;
185 }
186
187 inline bool
188 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
189 {
190 gimple stmt1 = p1->stmt;
191 const struct hashable_expr *expr1 = &p1->expr;
192 const struct expr_hash_elt *stamp1 = p1->stamp;
193 gimple stmt2 = p2->stmt;
194 const struct hashable_expr *expr2 = &p2->expr;
195 const struct expr_hash_elt *stamp2 = p2->stamp;
196
197 /* This case should apply only when removing entries from the table. */
198 if (stamp1 == stamp2)
199 return true;
200
201 /* FIXME tuples:
202 We add stmts to a hash table and them modify them. To detect the case
203 that we modify a stmt and then search for it, we assume that the hash
204 is always modified by that change.
205 We have to fully check why this doesn't happen on trunk or rewrite
206 this in a more reliable (and easier to understand) way. */
207 if (((const struct expr_hash_elt *)p1)->hash
208 != ((const struct expr_hash_elt *)p2)->hash)
209 return false;
210
211 /* In case of a collision, both RHS have to be identical and have the
212 same VUSE operands. */
213 if (hashable_expr_equal_p (expr1, expr2)
214 && types_compatible_p (expr1->type, expr2->type))
215 {
216 /* Note that STMT1 and/or STMT2 may be NULL. */
217 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
218 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
219 }
220
221 return false;
222 }
223
224 /* Delete an expr_hash_elt and reclaim its storage. */
225
226 inline void
227 expr_elt_hasher::remove (value_type &element)
228 {
229 free_expr_hash_elt (element);
230 }
231
232 /* Hash table with expressions made available during the renaming process.
233 When an assignment of the form X_i = EXPR is found, the statement is
234 stored in this table. If the same expression EXPR is later found on the
235 RHS of another statement, it is replaced with X_i (thus performing
236 global redundancy elimination). Similarly as we pass through conditionals
237 we record the conditional itself as having either a true or false value
238 in this table. */
239 static hash_table<expr_elt_hasher> *avail_exprs;
240
241 /* Stack of dest,src pairs that need to be restored during finalization.
242
243 A NULL entry is used to mark the end of pairs which need to be
244 restored during finalization of this block. */
245 static vec<tree> const_and_copies_stack;
246
247 /* Track whether or not we have changed the control flow graph. */
248 static bool cfg_altered;
249
250 /* Bitmap of blocks that have had EH statements cleaned. We should
251 remove their dead edges eventually. */
252 static bitmap need_eh_cleanup;
253
254 /* Statistics for dominator optimizations. */
255 struct opt_stats_d
256 {
257 long num_stmts;
258 long num_exprs_considered;
259 long num_re;
260 long num_const_prop;
261 long num_copy_prop;
262 };
263
264 static struct opt_stats_d opt_stats;
265
266 /* Local functions. */
267 static void optimize_stmt (basic_block, gimple_stmt_iterator);
268 static tree lookup_avail_expr (gimple, bool);
269 static hashval_t avail_expr_hash (const void *);
270 static void htab_statistics (FILE *,
271 const hash_table<expr_elt_hasher> &);
272 static void record_cond (cond_equivalence *);
273 static void record_const_or_copy (tree, tree);
274 static void record_equality (tree, tree);
275 static void record_equivalences_from_phis (basic_block);
276 static void record_equivalences_from_incoming_edge (basic_block);
277 static void eliminate_redundant_computations (gimple_stmt_iterator *);
278 static void record_equivalences_from_stmt (gimple, int);
279 static void remove_local_expressions_from_table (void);
280 static void restore_vars_to_original_value (void);
281 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
282
283
284 /* Given a statement STMT, initialize the hash table element pointed to
285 by ELEMENT. */
286
287 static void
288 initialize_hash_element (gimple stmt, tree lhs,
289 struct expr_hash_elt *element)
290 {
291 enum gimple_code code = gimple_code (stmt);
292 struct hashable_expr *expr = &element->expr;
293
294 if (code == GIMPLE_ASSIGN)
295 {
296 enum tree_code subcode = gimple_assign_rhs_code (stmt);
297
298 switch (get_gimple_rhs_class (subcode))
299 {
300 case GIMPLE_SINGLE_RHS:
301 expr->kind = EXPR_SINGLE;
302 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
303 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
304 break;
305 case GIMPLE_UNARY_RHS:
306 expr->kind = EXPR_UNARY;
307 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
308 if (CONVERT_EXPR_CODE_P (subcode))
309 subcode = NOP_EXPR;
310 expr->ops.unary.op = subcode;
311 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
312 break;
313 case GIMPLE_BINARY_RHS:
314 expr->kind = EXPR_BINARY;
315 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
316 expr->ops.binary.op = subcode;
317 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
318 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
319 break;
320 case GIMPLE_TERNARY_RHS:
321 expr->kind = EXPR_TERNARY;
322 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
323 expr->ops.ternary.op = subcode;
324 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
325 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
326 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
327 break;
328 default:
329 gcc_unreachable ();
330 }
331 }
332 else if (code == GIMPLE_COND)
333 {
334 expr->type = boolean_type_node;
335 expr->kind = EXPR_BINARY;
336 expr->ops.binary.op = gimple_cond_code (stmt);
337 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
338 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
339 }
340 else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
341 {
342 size_t nargs = gimple_call_num_args (call_stmt);
343 size_t i;
344
345 gcc_assert (gimple_call_lhs (call_stmt));
346
347 expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
348 expr->kind = EXPR_CALL;
349 expr->ops.call.fn_from = call_stmt;
350
351 if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
352 expr->ops.call.pure = true;
353 else
354 expr->ops.call.pure = false;
355
356 expr->ops.call.nargs = nargs;
357 expr->ops.call.args = XCNEWVEC (tree, nargs);
358 for (i = 0; i < nargs; i++)
359 expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
360 }
361 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
362 {
363 expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
364 expr->kind = EXPR_SINGLE;
365 expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
366 }
367 else if (code == GIMPLE_GOTO)
368 {
369 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
370 expr->kind = EXPR_SINGLE;
371 expr->ops.single.rhs = gimple_goto_dest (stmt);
372 }
373 else if (code == GIMPLE_PHI)
374 {
375 size_t nargs = gimple_phi_num_args (stmt);
376 size_t i;
377
378 expr->type = TREE_TYPE (gimple_phi_result (stmt));
379 expr->kind = EXPR_PHI;
380 expr->ops.phi.nargs = nargs;
381 expr->ops.phi.args = XCNEWVEC (tree, nargs);
382
383 for (i = 0; i < nargs; i++)
384 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
385 }
386 else
387 gcc_unreachable ();
388
389 element->lhs = lhs;
390 element->stmt = stmt;
391 element->hash = avail_expr_hash (element);
392 element->stamp = element;
393 }
394
395 /* Given a conditional expression COND as a tree, initialize
396 a hashable_expr expression EXPR. The conditional must be a
397 comparison or logical negation. A constant or a variable is
398 not permitted. */
399
400 static void
401 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
402 {
403 expr->type = boolean_type_node;
404
405 if (COMPARISON_CLASS_P (cond))
406 {
407 expr->kind = EXPR_BINARY;
408 expr->ops.binary.op = TREE_CODE (cond);
409 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
410 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
411 }
412 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
413 {
414 expr->kind = EXPR_UNARY;
415 expr->ops.unary.op = TRUTH_NOT_EXPR;
416 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
417 }
418 else
419 gcc_unreachable ();
420 }
421
422 /* Given a hashable_expr expression EXPR and an LHS,
423 initialize the hash table element pointed to by ELEMENT. */
424
425 static void
426 initialize_hash_element_from_expr (struct hashable_expr *expr,
427 tree lhs,
428 struct expr_hash_elt *element)
429 {
430 element->expr = *expr;
431 element->lhs = lhs;
432 element->stmt = NULL;
433 element->hash = avail_expr_hash (element);
434 element->stamp = element;
435 }
436
437 /* Compare two hashable_expr structures for equivalence.
438 They are considered equivalent when the the expressions
439 they denote must necessarily be equal. The logic is intended
440 to follow that of operand_equal_p in fold-const.c */
441
442 static bool
443 hashable_expr_equal_p (const struct hashable_expr *expr0,
444 const struct hashable_expr *expr1)
445 {
446 tree type0 = expr0->type;
447 tree type1 = expr1->type;
448
449 /* If either type is NULL, there is nothing to check. */
450 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
451 return false;
452
453 /* If both types don't have the same signedness, precision, and mode,
454 then we can't consider them equal. */
455 if (type0 != type1
456 && (TREE_CODE (type0) == ERROR_MARK
457 || TREE_CODE (type1) == ERROR_MARK
458 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
459 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
460 || TYPE_MODE (type0) != TYPE_MODE (type1)))
461 return false;
462
463 if (expr0->kind != expr1->kind)
464 return false;
465
466 switch (expr0->kind)
467 {
468 case EXPR_SINGLE:
469 return operand_equal_p (expr0->ops.single.rhs,
470 expr1->ops.single.rhs, 0);
471
472 case EXPR_UNARY:
473 if (expr0->ops.unary.op != expr1->ops.unary.op)
474 return false;
475
476 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
477 || expr0->ops.unary.op == NON_LVALUE_EXPR)
478 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
479 return false;
480
481 return operand_equal_p (expr0->ops.unary.opnd,
482 expr1->ops.unary.opnd, 0);
483
484 case EXPR_BINARY:
485 if (expr0->ops.binary.op != expr1->ops.binary.op)
486 return false;
487
488 if (operand_equal_p (expr0->ops.binary.opnd0,
489 expr1->ops.binary.opnd0, 0)
490 && operand_equal_p (expr0->ops.binary.opnd1,
491 expr1->ops.binary.opnd1, 0))
492 return true;
493
494 /* For commutative ops, allow the other order. */
495 return (commutative_tree_code (expr0->ops.binary.op)
496 && operand_equal_p (expr0->ops.binary.opnd0,
497 expr1->ops.binary.opnd1, 0)
498 && operand_equal_p (expr0->ops.binary.opnd1,
499 expr1->ops.binary.opnd0, 0));
500
501 case EXPR_TERNARY:
502 if (expr0->ops.ternary.op != expr1->ops.ternary.op
503 || !operand_equal_p (expr0->ops.ternary.opnd2,
504 expr1->ops.ternary.opnd2, 0))
505 return false;
506
507 if (operand_equal_p (expr0->ops.ternary.opnd0,
508 expr1->ops.ternary.opnd0, 0)
509 && operand_equal_p (expr0->ops.ternary.opnd1,
510 expr1->ops.ternary.opnd1, 0))
511 return true;
512
513 /* For commutative ops, allow the other order. */
514 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
515 && operand_equal_p (expr0->ops.ternary.opnd0,
516 expr1->ops.ternary.opnd1, 0)
517 && operand_equal_p (expr0->ops.ternary.opnd1,
518 expr1->ops.ternary.opnd0, 0));
519
520 case EXPR_CALL:
521 {
522 size_t i;
523
524 /* If the calls are to different functions, then they
525 clearly cannot be equal. */
526 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
527 expr1->ops.call.fn_from))
528 return false;
529
530 if (! expr0->ops.call.pure)
531 return false;
532
533 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
534 return false;
535
536 for (i = 0; i < expr0->ops.call.nargs; i++)
537 if (! operand_equal_p (expr0->ops.call.args[i],
538 expr1->ops.call.args[i], 0))
539 return false;
540
541 if (stmt_could_throw_p (expr0->ops.call.fn_from))
542 {
543 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
544 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
545 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
546 return false;
547 }
548
549 return true;
550 }
551
552 case EXPR_PHI:
553 {
554 size_t i;
555
556 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
557 return false;
558
559 for (i = 0; i < expr0->ops.phi.nargs; i++)
560 if (! operand_equal_p (expr0->ops.phi.args[i],
561 expr1->ops.phi.args[i], 0))
562 return false;
563
564 return true;
565 }
566
567 default:
568 gcc_unreachable ();
569 }
570 }
571
572 /* Generate a hash value for a pair of expressions. This can be used
573 iteratively by passing a previous result in HSTATE.
574
575 The same hash value is always returned for a given pair of expressions,
576 regardless of the order in which they are presented. This is useful in
577 hashing the operands of commutative functions. */
578
579 namespace inchash
580 {
581
582 static void
583 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
584 {
585 hash one, two;
586
587 inchash::add_expr (t1, one);
588 inchash::add_expr (t2, two);
589 hstate.add_commutative (one, two);
590 }
591
592 /* Compute a hash value for a hashable_expr value EXPR and a
593 previously accumulated hash value VAL. If two hashable_expr
594 values compare equal with hashable_expr_equal_p, they must
595 hash to the same value, given an identical value of VAL.
596 The logic is intended to follow inchash::add_expr in tree.c. */
597
598 static void
599 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
600 {
601 switch (expr->kind)
602 {
603 case EXPR_SINGLE:
604 inchash::add_expr (expr->ops.single.rhs, hstate);
605 break;
606
607 case EXPR_UNARY:
608 hstate.add_object (expr->ops.unary.op);
609
610 /* Make sure to include signedness in the hash computation.
611 Don't hash the type, that can lead to having nodes which
612 compare equal according to operand_equal_p, but which
613 have different hash codes. */
614 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
615 || expr->ops.unary.op == NON_LVALUE_EXPR)
616 hstate.add_int (TYPE_UNSIGNED (expr->type));
617
618 inchash::add_expr (expr->ops.unary.opnd, hstate);
619 break;
620
621 case EXPR_BINARY:
622 hstate.add_object (expr->ops.binary.op);
623 if (commutative_tree_code (expr->ops.binary.op))
624 inchash::add_expr_commutative (expr->ops.binary.opnd0,
625 expr->ops.binary.opnd1, hstate);
626 else
627 {
628 inchash::add_expr (expr->ops.binary.opnd0, hstate);
629 inchash::add_expr (expr->ops.binary.opnd1, hstate);
630 }
631 break;
632
633 case EXPR_TERNARY:
634 hstate.add_object (expr->ops.ternary.op);
635 if (commutative_ternary_tree_code (expr->ops.ternary.op))
636 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
637 expr->ops.ternary.opnd1, hstate);
638 else
639 {
640 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
641 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
642 }
643 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
644 break;
645
646 case EXPR_CALL:
647 {
648 size_t i;
649 enum tree_code code = CALL_EXPR;
650 gcall *fn_from;
651
652 hstate.add_object (code);
653 fn_from = expr->ops.call.fn_from;
654 if (gimple_call_internal_p (fn_from))
655 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
656 else
657 inchash::add_expr (gimple_call_fn (fn_from), hstate);
658 for (i = 0; i < expr->ops.call.nargs; i++)
659 inchash::add_expr (expr->ops.call.args[i], hstate);
660 }
661 break;
662
663 case EXPR_PHI:
664 {
665 size_t i;
666
667 for (i = 0; i < expr->ops.phi.nargs; i++)
668 inchash::add_expr (expr->ops.phi.args[i], hstate);
669 }
670 break;
671
672 default:
673 gcc_unreachable ();
674 }
675 }
676
677 }
678
679 /* Print a diagnostic dump of an expression hash table entry. */
680
681 static void
682 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
683 {
684 if (element->stmt)
685 fprintf (stream, "STMT ");
686 else
687 fprintf (stream, "COND ");
688
689 if (element->lhs)
690 {
691 print_generic_expr (stream, element->lhs, 0);
692 fprintf (stream, " = ");
693 }
694
695 switch (element->expr.kind)
696 {
697 case EXPR_SINGLE:
698 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
699 break;
700
701 case EXPR_UNARY:
702 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
703 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
704 break;
705
706 case EXPR_BINARY:
707 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
708 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
709 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
710 break;
711
712 case EXPR_TERNARY:
713 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
714 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
715 fputs (", ", stream);
716 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
717 fputs (", ", stream);
718 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
719 fputs (">", stream);
720 break;
721
722 case EXPR_CALL:
723 {
724 size_t i;
725 size_t nargs = element->expr.ops.call.nargs;
726 gcall *fn_from;
727
728 fn_from = element->expr.ops.call.fn_from;
729 if (gimple_call_internal_p (fn_from))
730 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
731 stream);
732 else
733 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
734 fprintf (stream, " (");
735 for (i = 0; i < nargs; i++)
736 {
737 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
738 if (i + 1 < nargs)
739 fprintf (stream, ", ");
740 }
741 fprintf (stream, ")");
742 }
743 break;
744
745 case EXPR_PHI:
746 {
747 size_t i;
748 size_t nargs = element->expr.ops.phi.nargs;
749
750 fprintf (stream, "PHI <");
751 for (i = 0; i < nargs; i++)
752 {
753 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
754 if (i + 1 < nargs)
755 fprintf (stream, ", ");
756 }
757 fprintf (stream, ">");
758 }
759 break;
760 }
761 fprintf (stream, "\n");
762
763 if (element->stmt)
764 {
765 fprintf (stream, " ");
766 print_gimple_stmt (stream, element->stmt, 0, 0);
767 }
768 }
769
770 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
771
772 static void
773 free_expr_hash_elt_contents (struct expr_hash_elt *element)
774 {
775 if (element->expr.kind == EXPR_CALL)
776 free (element->expr.ops.call.args);
777 else if (element->expr.kind == EXPR_PHI)
778 free (element->expr.ops.phi.args);
779 }
780
781 /* Delete an expr_hash_elt and reclaim its storage. */
782
783 static void
784 free_expr_hash_elt (void *elt)
785 {
786 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
787 free_expr_hash_elt_contents (element);
788 free (element);
789 }
790
791 /* Allocate an EDGE_INFO for edge E and attach it to E.
792 Return the new EDGE_INFO structure. */
793
794 static struct edge_info *
795 allocate_edge_info (edge e)
796 {
797 struct edge_info *edge_info;
798
799 edge_info = XCNEW (struct edge_info);
800
801 e->aux = edge_info;
802 return edge_info;
803 }
804
805 /* Free all EDGE_INFO structures associated with edges in the CFG.
806 If a particular edge can be threaded, copy the redirection
807 target from the EDGE_INFO structure into the edge's AUX field
808 as required by code to update the CFG and SSA graph for
809 jump threading. */
810
811 static void
812 free_all_edge_infos (void)
813 {
814 basic_block bb;
815 edge_iterator ei;
816 edge e;
817
818 FOR_EACH_BB_FN (bb, cfun)
819 {
820 FOR_EACH_EDGE (e, ei, bb->preds)
821 {
822 struct edge_info *edge_info = (struct edge_info *) e->aux;
823
824 if (edge_info)
825 {
826 edge_info->cond_equivalences.release ();
827 free (edge_info);
828 e->aux = NULL;
829 }
830 }
831 }
832 }
833
834 class dom_opt_dom_walker : public dom_walker
835 {
836 public:
837 dom_opt_dom_walker (cdi_direction direction)
838 : dom_walker (direction), m_dummy_cond (NULL) {}
839
840 virtual void before_dom_children (basic_block);
841 virtual void after_dom_children (basic_block);
842
843 private:
844 void thread_across_edge (edge);
845
846 gcond *m_dummy_cond;
847 };
848
849 /* Jump threading, redundancy elimination and const/copy propagation.
850
851 This pass may expose new symbols that need to be renamed into SSA. For
852 every new symbol exposed, its corresponding bit will be set in
853 VARS_TO_RENAME. */
854
855 namespace {
856
857 const pass_data pass_data_dominator =
858 {
859 GIMPLE_PASS, /* type */
860 "dom", /* name */
861 OPTGROUP_NONE, /* optinfo_flags */
862 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
863 ( PROP_cfg | PROP_ssa ), /* properties_required */
864 0, /* properties_provided */
865 0, /* properties_destroyed */
866 0, /* todo_flags_start */
867 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
868 };
869
870 class pass_dominator : public gimple_opt_pass
871 {
872 public:
873 pass_dominator (gcc::context *ctxt)
874 : gimple_opt_pass (pass_data_dominator, ctxt)
875 {}
876
877 /* opt_pass methods: */
878 opt_pass * clone () { return new pass_dominator (m_ctxt); }
879 virtual bool gate (function *) { return flag_tree_dom != 0; }
880 virtual unsigned int execute (function *);
881
882 }; // class pass_dominator
883
884 unsigned int
885 pass_dominator::execute (function *fun)
886 {
887 memset (&opt_stats, 0, sizeof (opt_stats));
888
889 /* Create our hash tables. */
890 avail_exprs = new hash_table<expr_elt_hasher> (1024);
891 avail_exprs_stack.create (20);
892 const_and_copies_stack.create (20);
893 need_eh_cleanup = BITMAP_ALLOC (NULL);
894
895 calculate_dominance_info (CDI_DOMINATORS);
896 cfg_altered = false;
897
898 /* We need to know loop structures in order to avoid destroying them
899 in jump threading. Note that we still can e.g. thread through loop
900 headers to an exit edge, or through loop header to the loop body, assuming
901 that we update the loop info.
902
903 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
904 to several overly conservative bail-outs in jump threading, case
905 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
906 missing. We should improve jump threading in future then
907 LOOPS_HAVE_PREHEADERS won't be needed here. */
908 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
909
910 /* Initialize the value-handle array. */
911 threadedge_initialize_values ();
912
913 /* We need accurate information regarding back edges in the CFG
914 for jump threading; this may include back edges that are not part of
915 a single loop. */
916 mark_dfs_back_edges ();
917
918 /* Recursively walk the dominator tree optimizing statements. */
919 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
920
921 {
922 gimple_stmt_iterator gsi;
923 basic_block bb;
924 FOR_EACH_BB_FN (bb, fun)
925 {
926 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
927 update_stmt_if_modified (gsi_stmt (gsi));
928 }
929 }
930
931 /* If we exposed any new variables, go ahead and put them into
932 SSA form now, before we handle jump threading. This simplifies
933 interactions between rewriting of _DECL nodes into SSA form
934 and rewriting SSA_NAME nodes into SSA form after block
935 duplication and CFG manipulation. */
936 update_ssa (TODO_update_ssa);
937
938 free_all_edge_infos ();
939
940 /* Thread jumps, creating duplicate blocks as needed. */
941 cfg_altered |= thread_through_all_blocks (first_pass_instance);
942
943 if (cfg_altered)
944 free_dominance_info (CDI_DOMINATORS);
945
946 /* Removal of statements may make some EH edges dead. Purge
947 such edges from the CFG as needed. */
948 if (!bitmap_empty_p (need_eh_cleanup))
949 {
950 unsigned i;
951 bitmap_iterator bi;
952
953 /* Jump threading may have created forwarder blocks from blocks
954 needing EH cleanup; the new successor of these blocks, which
955 has inherited from the original block, needs the cleanup.
956 Don't clear bits in the bitmap, as that can break the bitmap
957 iterator. */
958 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
959 {
960 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
961 if (bb == NULL)
962 continue;
963 while (single_succ_p (bb)
964 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
965 bb = single_succ (bb);
966 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
967 continue;
968 if ((unsigned) bb->index != i)
969 bitmap_set_bit (need_eh_cleanup, bb->index);
970 }
971
972 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
973 bitmap_clear (need_eh_cleanup);
974 }
975
976 statistics_counter_event (fun, "Redundant expressions eliminated",
977 opt_stats.num_re);
978 statistics_counter_event (fun, "Constants propagated",
979 opt_stats.num_const_prop);
980 statistics_counter_event (fun, "Copies propagated",
981 opt_stats.num_copy_prop);
982
983 /* Debugging dumps. */
984 if (dump_file && (dump_flags & TDF_STATS))
985 dump_dominator_optimization_stats (dump_file);
986
987 loop_optimizer_finalize ();
988
989 /* Delete our main hashtable. */
990 delete avail_exprs;
991 avail_exprs = NULL;
992
993 /* Free asserted bitmaps and stacks. */
994 BITMAP_FREE (need_eh_cleanup);
995
996 avail_exprs_stack.release ();
997 const_and_copies_stack.release ();
998
999 /* Free the value-handle array. */
1000 threadedge_finalize_values ();
1001
1002 return 0;
1003 }
1004
1005 } // anon namespace
1006
1007 gimple_opt_pass *
1008 make_pass_dominator (gcc::context *ctxt)
1009 {
1010 return new pass_dominator (ctxt);
1011 }
1012
1013
1014 /* Given a conditional statement CONDSTMT, convert the
1015 condition to a canonical form. */
1016
1017 static void
1018 canonicalize_comparison (gcond *condstmt)
1019 {
1020 tree op0;
1021 tree op1;
1022 enum tree_code code;
1023
1024 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1025
1026 op0 = gimple_cond_lhs (condstmt);
1027 op1 = gimple_cond_rhs (condstmt);
1028
1029 code = gimple_cond_code (condstmt);
1030
1031 /* If it would be profitable to swap the operands, then do so to
1032 canonicalize the statement, enabling better optimization.
1033
1034 By placing canonicalization of such expressions here we
1035 transparently keep statements in canonical form, even
1036 when the statement is modified. */
1037 if (tree_swap_operands_p (op0, op1, false))
1038 {
1039 /* For relationals we need to swap the operands
1040 and change the code. */
1041 if (code == LT_EXPR
1042 || code == GT_EXPR
1043 || code == LE_EXPR
1044 || code == GE_EXPR)
1045 {
1046 code = swap_tree_comparison (code);
1047
1048 gimple_cond_set_code (condstmt, code);
1049 gimple_cond_set_lhs (condstmt, op1);
1050 gimple_cond_set_rhs (condstmt, op0);
1051
1052 update_stmt (condstmt);
1053 }
1054 }
1055 }
1056
1057 /* Initialize local stacks for this optimizer and record equivalences
1058 upon entry to BB. Equivalences can come from the edge traversed to
1059 reach BB or they may come from PHI nodes at the start of BB. */
1060
1061 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1062 LIMIT entries left in LOCALs. */
1063
1064 static void
1065 remove_local_expressions_from_table (void)
1066 {
1067 /* Remove all the expressions made available in this block. */
1068 while (avail_exprs_stack.length () > 0)
1069 {
1070 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1071 expr_hash_elt **slot;
1072
1073 if (victim == NULL)
1074 break;
1075
1076 /* This must precede the actual removal from the hash table,
1077 as ELEMENT and the table entry may share a call argument
1078 vector which will be freed during removal. */
1079 if (dump_file && (dump_flags & TDF_DETAILS))
1080 {
1081 fprintf (dump_file, "<<<< ");
1082 print_expr_hash_elt (dump_file, victim);
1083 }
1084
1085 slot = avail_exprs->find_slot (victim, NO_INSERT);
1086 gcc_assert (slot && *slot == victim);
1087 avail_exprs->clear_slot (slot);
1088 }
1089 }
1090
1091 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1092 CONST_AND_COPIES to its original state, stopping when we hit a
1093 NULL marker. */
1094
1095 static void
1096 restore_vars_to_original_value (void)
1097 {
1098 while (const_and_copies_stack.length () > 0)
1099 {
1100 tree prev_value, dest;
1101
1102 dest = const_and_copies_stack.pop ();
1103
1104 if (dest == NULL)
1105 break;
1106
1107 if (dump_file && (dump_flags & TDF_DETAILS))
1108 {
1109 fprintf (dump_file, "<<<< COPY ");
1110 print_generic_expr (dump_file, dest, 0);
1111 fprintf (dump_file, " = ");
1112 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1113 fprintf (dump_file, "\n");
1114 }
1115
1116 prev_value = const_and_copies_stack.pop ();
1117 set_ssa_name_value (dest, prev_value);
1118 }
1119 }
1120
1121 /* A trivial wrapper so that we can present the generic jump
1122 threading code with a simple API for simplifying statements. */
1123 static tree
1124 simplify_stmt_for_jump_threading (gimple stmt,
1125 gimple within_stmt ATTRIBUTE_UNUSED)
1126 {
1127 return lookup_avail_expr (stmt, false);
1128 }
1129
1130 /* Record into the equivalence tables any equivalences implied by
1131 traversing edge E (which are cached in E->aux).
1132
1133 Callers are responsible for managing the unwinding markers. */
1134 static void
1135 record_temporary_equivalences (edge e)
1136 {
1137 int i;
1138 struct edge_info *edge_info = (struct edge_info *) e->aux;
1139
1140 /* If we have info associated with this edge, record it into
1141 our equivalence tables. */
1142 if (edge_info)
1143 {
1144 cond_equivalence *eq;
1145 tree lhs = edge_info->lhs;
1146 tree rhs = edge_info->rhs;
1147
1148 /* If we have a simple NAME = VALUE equivalence, record it. */
1149 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1150 record_const_or_copy (lhs, rhs);
1151
1152 /* If we have 0 = COND or 1 = COND equivalences, record them
1153 into our expression hash tables. */
1154 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1155 record_cond (eq);
1156 }
1157 }
1158
1159 /* Wrapper for common code to attempt to thread an edge. For example,
1160 it handles lazily building the dummy condition and the bookkeeping
1161 when jump threading is successful. */
1162
1163 void
1164 dom_opt_dom_walker::thread_across_edge (edge e)
1165 {
1166 if (! m_dummy_cond)
1167 m_dummy_cond =
1168 gimple_build_cond (NE_EXPR,
1169 integer_zero_node, integer_zero_node,
1170 NULL, NULL);
1171
1172 /* Push a marker on both stacks so we can unwind the tables back to their
1173 current state. */
1174 avail_exprs_stack.safe_push (NULL);
1175 const_and_copies_stack.safe_push (NULL_TREE);
1176
1177 /* Traversing E may result in equivalences we can utilize. */
1178 record_temporary_equivalences (e);
1179
1180 /* With all the edge equivalences in the tables, go ahead and attempt
1181 to thread through E->dest. */
1182 ::thread_across_edge (m_dummy_cond, e, false,
1183 &const_and_copies_stack,
1184 simplify_stmt_for_jump_threading);
1185
1186 /* And restore the various tables to their state before
1187 we threaded this edge.
1188
1189 XXX The code in tree-ssa-threadedge.c will restore the state of
1190 the const_and_copies table. We we just have to restore the expression
1191 table. */
1192 remove_local_expressions_from_table ();
1193 }
1194
1195 /* PHI nodes can create equivalences too.
1196
1197 Ignoring any alternatives which are the same as the result, if
1198 all the alternatives are equal, then the PHI node creates an
1199 equivalence. */
1200
1201 static void
1202 record_equivalences_from_phis (basic_block bb)
1203 {
1204 gphi_iterator gsi;
1205
1206 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1207 {
1208 gphi *phi = gsi.phi ();
1209
1210 tree lhs = gimple_phi_result (phi);
1211 tree rhs = NULL;
1212 size_t i;
1213
1214 for (i = 0; i < gimple_phi_num_args (phi); i++)
1215 {
1216 tree t = gimple_phi_arg_def (phi, i);
1217
1218 /* Ignore alternatives which are the same as our LHS. Since
1219 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1220 can simply compare pointers. */
1221 if (lhs == t)
1222 continue;
1223
1224 /* If we have not processed an alternative yet, then set
1225 RHS to this alternative. */
1226 if (rhs == NULL)
1227 rhs = t;
1228 /* If we have processed an alternative (stored in RHS), then
1229 see if it is equal to this one. If it isn't, then stop
1230 the search. */
1231 else if (! operand_equal_for_phi_arg_p (rhs, t))
1232 break;
1233 }
1234
1235 /* If we had no interesting alternatives, then all the RHS alternatives
1236 must have been the same as LHS. */
1237 if (!rhs)
1238 rhs = lhs;
1239
1240 /* If we managed to iterate through each PHI alternative without
1241 breaking out of the loop, then we have a PHI which may create
1242 a useful equivalence. We do not need to record unwind data for
1243 this, since this is a true assignment and not an equivalence
1244 inferred from a comparison. All uses of this ssa name are dominated
1245 by this assignment, so unwinding just costs time and space. */
1246 if (i == gimple_phi_num_args (phi)
1247 && may_propagate_copy (lhs, rhs))
1248 set_ssa_name_value (lhs, rhs);
1249 }
1250 }
1251
1252 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1253 return that edge. Otherwise return NULL. */
1254 static edge
1255 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1256 {
1257 edge retval = NULL;
1258 edge e;
1259 edge_iterator ei;
1260
1261 FOR_EACH_EDGE (e, ei, bb->preds)
1262 {
1263 /* A loop back edge can be identified by the destination of
1264 the edge dominating the source of the edge. */
1265 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1266 continue;
1267
1268 /* If we have already seen a non-loop edge, then we must have
1269 multiple incoming non-loop edges and thus we return NULL. */
1270 if (retval)
1271 return NULL;
1272
1273 /* This is the first non-loop incoming edge we have found. Record
1274 it. */
1275 retval = e;
1276 }
1277
1278 return retval;
1279 }
1280
1281 /* Record any equivalences created by the incoming edge to BB. If BB
1282 has more than one incoming edge, then no equivalence is created. */
1283
1284 static void
1285 record_equivalences_from_incoming_edge (basic_block bb)
1286 {
1287 edge e;
1288 basic_block parent;
1289 struct edge_info *edge_info;
1290
1291 /* If our parent block ended with a control statement, then we may be
1292 able to record some equivalences based on which outgoing edge from
1293 the parent was followed. */
1294 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1295
1296 e = single_incoming_edge_ignoring_loop_edges (bb);
1297
1298 /* If we had a single incoming edge from our parent block, then enter
1299 any data associated with the edge into our tables. */
1300 if (e && e->src == parent)
1301 {
1302 unsigned int i;
1303
1304 edge_info = (struct edge_info *) e->aux;
1305
1306 if (edge_info)
1307 {
1308 tree lhs = edge_info->lhs;
1309 tree rhs = edge_info->rhs;
1310 cond_equivalence *eq;
1311
1312 if (lhs)
1313 record_equality (lhs, rhs);
1314
1315 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1316 set via a widening type conversion, then we may be able to record
1317 additional equivalences. */
1318 if (lhs
1319 && TREE_CODE (lhs) == SSA_NAME
1320 && is_gimple_constant (rhs)
1321 && TREE_CODE (rhs) == INTEGER_CST)
1322 {
1323 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1324
1325 if (defstmt
1326 && is_gimple_assign (defstmt)
1327 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1328 {
1329 tree old_rhs = gimple_assign_rhs1 (defstmt);
1330
1331 /* If the conversion widens the original value and
1332 the constant is in the range of the type of OLD_RHS,
1333 then convert the constant and record the equivalence.
1334
1335 Note that int_fits_type_p does not check the precision
1336 if the upper and lower bounds are OK. */
1337 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1338 && (TYPE_PRECISION (TREE_TYPE (lhs))
1339 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1340 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1341 {
1342 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1343 record_equality (old_rhs, newval);
1344 }
1345 }
1346 }
1347
1348 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1349 record_cond (eq);
1350 }
1351 }
1352 }
1353
1354 /* Dump SSA statistics on FILE. */
1355
1356 void
1357 dump_dominator_optimization_stats (FILE *file)
1358 {
1359 fprintf (file, "Total number of statements: %6ld\n\n",
1360 opt_stats.num_stmts);
1361 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1362 opt_stats.num_exprs_considered);
1363
1364 fprintf (file, "\nHash table statistics:\n");
1365
1366 fprintf (file, " avail_exprs: ");
1367 htab_statistics (file, *avail_exprs);
1368 }
1369
1370
1371 /* Dump SSA statistics on stderr. */
1372
1373 DEBUG_FUNCTION void
1374 debug_dominator_optimization_stats (void)
1375 {
1376 dump_dominator_optimization_stats (stderr);
1377 }
1378
1379
1380 /* Dump statistics for the hash table HTAB. */
1381
1382 static void
1383 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1384 {
1385 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1386 (long) htab.size (),
1387 (long) htab.elements (),
1388 htab.collisions ());
1389 }
1390
1391
1392 /* Enter condition equivalence into the expression hash table.
1393 This indicates that a conditional expression has a known
1394 boolean value. */
1395
1396 static void
1397 record_cond (cond_equivalence *p)
1398 {
1399 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1400 expr_hash_elt **slot;
1401
1402 initialize_hash_element_from_expr (&p->cond, p->value, element);
1403
1404 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1405 if (*slot == NULL)
1406 {
1407 *slot = element;
1408
1409 if (dump_file && (dump_flags & TDF_DETAILS))
1410 {
1411 fprintf (dump_file, "1>>> ");
1412 print_expr_hash_elt (dump_file, element);
1413 }
1414
1415 avail_exprs_stack.safe_push (element);
1416 }
1417 else
1418 free_expr_hash_elt (element);
1419 }
1420
1421 /* Build a cond_equivalence record indicating that the comparison
1422 CODE holds between operands OP0 and OP1 and push it to **P. */
1423
1424 static void
1425 build_and_record_new_cond (enum tree_code code,
1426 tree op0, tree op1,
1427 vec<cond_equivalence> *p)
1428 {
1429 cond_equivalence c;
1430 struct hashable_expr *cond = &c.cond;
1431
1432 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1433
1434 cond->type = boolean_type_node;
1435 cond->kind = EXPR_BINARY;
1436 cond->ops.binary.op = code;
1437 cond->ops.binary.opnd0 = op0;
1438 cond->ops.binary.opnd1 = op1;
1439
1440 c.value = boolean_true_node;
1441 p->safe_push (c);
1442 }
1443
1444 /* Record that COND is true and INVERTED is false into the edge information
1445 structure. Also record that any conditions dominated by COND are true
1446 as well.
1447
1448 For example, if a < b is true, then a <= b must also be true. */
1449
1450 static void
1451 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1452 {
1453 tree op0, op1;
1454 cond_equivalence c;
1455
1456 if (!COMPARISON_CLASS_P (cond))
1457 return;
1458
1459 op0 = TREE_OPERAND (cond, 0);
1460 op1 = TREE_OPERAND (cond, 1);
1461
1462 switch (TREE_CODE (cond))
1463 {
1464 case LT_EXPR:
1465 case GT_EXPR:
1466 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1467 {
1468 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1469 &edge_info->cond_equivalences);
1470 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1471 &edge_info->cond_equivalences);
1472 }
1473
1474 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1475 ? LE_EXPR : GE_EXPR),
1476 op0, op1, &edge_info->cond_equivalences);
1477 build_and_record_new_cond (NE_EXPR, op0, op1,
1478 &edge_info->cond_equivalences);
1479 break;
1480
1481 case GE_EXPR:
1482 case LE_EXPR:
1483 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1484 {
1485 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1486 &edge_info->cond_equivalences);
1487 }
1488 break;
1489
1490 case EQ_EXPR:
1491 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1492 {
1493 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1494 &edge_info->cond_equivalences);
1495 }
1496 build_and_record_new_cond (LE_EXPR, op0, op1,
1497 &edge_info->cond_equivalences);
1498 build_and_record_new_cond (GE_EXPR, op0, op1,
1499 &edge_info->cond_equivalences);
1500 break;
1501
1502 case UNORDERED_EXPR:
1503 build_and_record_new_cond (NE_EXPR, op0, op1,
1504 &edge_info->cond_equivalences);
1505 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1506 &edge_info->cond_equivalences);
1507 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1508 &edge_info->cond_equivalences);
1509 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1510 &edge_info->cond_equivalences);
1511 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1512 &edge_info->cond_equivalences);
1513 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1514 &edge_info->cond_equivalences);
1515 break;
1516
1517 case UNLT_EXPR:
1518 case UNGT_EXPR:
1519 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1520 ? UNLE_EXPR : UNGE_EXPR),
1521 op0, op1, &edge_info->cond_equivalences);
1522 build_and_record_new_cond (NE_EXPR, op0, op1,
1523 &edge_info->cond_equivalences);
1524 break;
1525
1526 case UNEQ_EXPR:
1527 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1528 &edge_info->cond_equivalences);
1529 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1530 &edge_info->cond_equivalences);
1531 break;
1532
1533 case LTGT_EXPR:
1534 build_and_record_new_cond (NE_EXPR, op0, op1,
1535 &edge_info->cond_equivalences);
1536 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1537 &edge_info->cond_equivalences);
1538 break;
1539
1540 default:
1541 break;
1542 }
1543
1544 /* Now store the original true and false conditions into the first
1545 two slots. */
1546 initialize_expr_from_cond (cond, &c.cond);
1547 c.value = boolean_true_node;
1548 edge_info->cond_equivalences.safe_push (c);
1549
1550 /* It is possible for INVERTED to be the negation of a comparison,
1551 and not a valid RHS or GIMPLE_COND condition. This happens because
1552 invert_truthvalue may return such an expression when asked to invert
1553 a floating-point comparison. These comparisons are not assumed to
1554 obey the trichotomy law. */
1555 initialize_expr_from_cond (inverted, &c.cond);
1556 c.value = boolean_false_node;
1557 edge_info->cond_equivalences.safe_push (c);
1558 }
1559
1560 /* A helper function for record_const_or_copy and record_equality.
1561 Do the work of recording the value and undo info. */
1562
1563 static void
1564 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1565 {
1566 set_ssa_name_value (x, y);
1567
1568 if (dump_file && (dump_flags & TDF_DETAILS))
1569 {
1570 fprintf (dump_file, "0>>> COPY ");
1571 print_generic_expr (dump_file, x, 0);
1572 fprintf (dump_file, " = ");
1573 print_generic_expr (dump_file, y, 0);
1574 fprintf (dump_file, "\n");
1575 }
1576
1577 const_and_copies_stack.reserve (2);
1578 const_and_copies_stack.quick_push (prev_x);
1579 const_and_copies_stack.quick_push (x);
1580 }
1581
1582 /* Record that X is equal to Y in const_and_copies. Record undo
1583 information in the block-local vector. */
1584
1585 static void
1586 record_const_or_copy (tree x, tree y)
1587 {
1588 tree prev_x = SSA_NAME_VALUE (x);
1589
1590 gcc_assert (TREE_CODE (x) == SSA_NAME);
1591
1592 if (TREE_CODE (y) == SSA_NAME)
1593 {
1594 tree tmp = SSA_NAME_VALUE (y);
1595 if (tmp)
1596 y = tmp;
1597 }
1598
1599 record_const_or_copy_1 (x, y, prev_x);
1600 }
1601
1602 /* Return the loop depth of the basic block of the defining statement of X.
1603 This number should not be treated as absolutely correct because the loop
1604 information may not be completely up-to-date when dom runs. However, it
1605 will be relatively correct, and as more passes are taught to keep loop info
1606 up to date, the result will become more and more accurate. */
1607
1608 static int
1609 loop_depth_of_name (tree x)
1610 {
1611 gimple defstmt;
1612 basic_block defbb;
1613
1614 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1615 if (TREE_CODE (x) != SSA_NAME)
1616 return 0;
1617
1618 /* Otherwise return the loop depth of the defining statement's bb.
1619 Note that there may not actually be a bb for this statement, if the
1620 ssa_name is live on entry. */
1621 defstmt = SSA_NAME_DEF_STMT (x);
1622 defbb = gimple_bb (defstmt);
1623 if (!defbb)
1624 return 0;
1625
1626 return bb_loop_depth (defbb);
1627 }
1628
1629 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1630 This constrains the cases in which we may treat this as assignment. */
1631
1632 static void
1633 record_equality (tree x, tree y)
1634 {
1635 tree prev_x = NULL, prev_y = NULL;
1636
1637 if (TREE_CODE (x) == SSA_NAME)
1638 prev_x = SSA_NAME_VALUE (x);
1639 if (TREE_CODE (y) == SSA_NAME)
1640 prev_y = SSA_NAME_VALUE (y);
1641
1642 /* If one of the previous values is invariant, or invariant in more loops
1643 (by depth), then use that.
1644 Otherwise it doesn't matter which value we choose, just so
1645 long as we canonicalize on one value. */
1646 if (is_gimple_min_invariant (y))
1647 ;
1648 else if (is_gimple_min_invariant (x)
1649 /* ??? When threading over backedges the following is important
1650 for correctness. See PR61757. */
1651 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1652 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1653 else if (prev_x && is_gimple_min_invariant (prev_x))
1654 x = y, y = prev_x, prev_x = prev_y;
1655 else if (prev_y)
1656 y = prev_y;
1657
1658 /* After the swapping, we must have one SSA_NAME. */
1659 if (TREE_CODE (x) != SSA_NAME)
1660 return;
1661
1662 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1663 variable compared against zero. If we're honoring signed zeros,
1664 then we cannot record this value unless we know that the value is
1665 nonzero. */
1666 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1667 && (TREE_CODE (y) != REAL_CST
1668 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1669 return;
1670
1671 record_const_or_copy_1 (x, y, prev_x);
1672 }
1673
1674 /* Returns true when STMT is a simple iv increment. It detects the
1675 following situation:
1676
1677 i_1 = phi (..., i_2)
1678 i_2 = i_1 +/- ... */
1679
1680 bool
1681 simple_iv_increment_p (gimple stmt)
1682 {
1683 enum tree_code code;
1684 tree lhs, preinc;
1685 gimple phi;
1686 size_t i;
1687
1688 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1689 return false;
1690
1691 lhs = gimple_assign_lhs (stmt);
1692 if (TREE_CODE (lhs) != SSA_NAME)
1693 return false;
1694
1695 code = gimple_assign_rhs_code (stmt);
1696 if (code != PLUS_EXPR
1697 && code != MINUS_EXPR
1698 && code != POINTER_PLUS_EXPR)
1699 return false;
1700
1701 preinc = gimple_assign_rhs1 (stmt);
1702 if (TREE_CODE (preinc) != SSA_NAME)
1703 return false;
1704
1705 phi = SSA_NAME_DEF_STMT (preinc);
1706 if (gimple_code (phi) != GIMPLE_PHI)
1707 return false;
1708
1709 for (i = 0; i < gimple_phi_num_args (phi); i++)
1710 if (gimple_phi_arg_def (phi, i) == lhs)
1711 return true;
1712
1713 return false;
1714 }
1715
1716 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1717 known value for that SSA_NAME (or NULL if no value is known).
1718
1719 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1720 successors of BB. */
1721
1722 static void
1723 cprop_into_successor_phis (basic_block bb)
1724 {
1725 edge e;
1726 edge_iterator ei;
1727
1728 FOR_EACH_EDGE (e, ei, bb->succs)
1729 {
1730 int indx;
1731 gphi_iterator gsi;
1732
1733 /* If this is an abnormal edge, then we do not want to copy propagate
1734 into the PHI alternative associated with this edge. */
1735 if (e->flags & EDGE_ABNORMAL)
1736 continue;
1737
1738 gsi = gsi_start_phis (e->dest);
1739 if (gsi_end_p (gsi))
1740 continue;
1741
1742 /* We may have an equivalence associated with this edge. While
1743 we can not propagate it into non-dominated blocks, we can
1744 propagate them into PHIs in non-dominated blocks. */
1745
1746 /* Push the unwind marker so we can reset the const and copies
1747 table back to its original state after processing this edge. */
1748 const_and_copies_stack.safe_push (NULL_TREE);
1749
1750 /* Extract and record any simple NAME = VALUE equivalences.
1751
1752 Don't bother with [01] = COND equivalences, they're not useful
1753 here. */
1754 struct edge_info *edge_info = (struct edge_info *) e->aux;
1755 if (edge_info)
1756 {
1757 tree lhs = edge_info->lhs;
1758 tree rhs = edge_info->rhs;
1759
1760 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1761 record_const_or_copy (lhs, rhs);
1762 }
1763
1764 indx = e->dest_idx;
1765 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1766 {
1767 tree new_val;
1768 use_operand_p orig_p;
1769 tree orig_val;
1770 gphi *phi = gsi.phi ();
1771
1772 /* The alternative may be associated with a constant, so verify
1773 it is an SSA_NAME before doing anything with it. */
1774 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1775 orig_val = get_use_from_ptr (orig_p);
1776 if (TREE_CODE (orig_val) != SSA_NAME)
1777 continue;
1778
1779 /* If we have *ORIG_P in our constant/copy table, then replace
1780 ORIG_P with its value in our constant/copy table. */
1781 new_val = SSA_NAME_VALUE (orig_val);
1782 if (new_val
1783 && new_val != orig_val
1784 && (TREE_CODE (new_val) == SSA_NAME
1785 || is_gimple_min_invariant (new_val))
1786 && may_propagate_copy (orig_val, new_val))
1787 propagate_value (orig_p, new_val);
1788 }
1789
1790 restore_vars_to_original_value ();
1791 }
1792 }
1793
1794 /* We have finished optimizing BB, record any information implied by
1795 taking a specific outgoing edge from BB. */
1796
1797 static void
1798 record_edge_info (basic_block bb)
1799 {
1800 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1801 struct edge_info *edge_info;
1802
1803 if (! gsi_end_p (gsi))
1804 {
1805 gimple stmt = gsi_stmt (gsi);
1806 location_t loc = gimple_location (stmt);
1807
1808 if (gimple_code (stmt) == GIMPLE_SWITCH)
1809 {
1810 gswitch *switch_stmt = as_a <gswitch *> (stmt);
1811 tree index = gimple_switch_index (switch_stmt);
1812
1813 if (TREE_CODE (index) == SSA_NAME)
1814 {
1815 int i;
1816 int n_labels = gimple_switch_num_labels (switch_stmt);
1817 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1818 edge e;
1819 edge_iterator ei;
1820
1821 for (i = 0; i < n_labels; i++)
1822 {
1823 tree label = gimple_switch_label (switch_stmt, i);
1824 basic_block target_bb = label_to_block (CASE_LABEL (label));
1825 if (CASE_HIGH (label)
1826 || !CASE_LOW (label)
1827 || info[target_bb->index])
1828 info[target_bb->index] = error_mark_node;
1829 else
1830 info[target_bb->index] = label;
1831 }
1832
1833 FOR_EACH_EDGE (e, ei, bb->succs)
1834 {
1835 basic_block target_bb = e->dest;
1836 tree label = info[target_bb->index];
1837
1838 if (label != NULL && label != error_mark_node)
1839 {
1840 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1841 CASE_LOW (label));
1842 edge_info = allocate_edge_info (e);
1843 edge_info->lhs = index;
1844 edge_info->rhs = x;
1845 }
1846 }
1847 free (info);
1848 }
1849 }
1850
1851 /* A COND_EXPR may create equivalences too. */
1852 if (gimple_code (stmt) == GIMPLE_COND)
1853 {
1854 edge true_edge;
1855 edge false_edge;
1856
1857 tree op0 = gimple_cond_lhs (stmt);
1858 tree op1 = gimple_cond_rhs (stmt);
1859 enum tree_code code = gimple_cond_code (stmt);
1860
1861 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1862
1863 /* Special case comparing booleans against a constant as we
1864 know the value of OP0 on both arms of the branch. i.e., we
1865 can record an equivalence for OP0 rather than COND. */
1866 if ((code == EQ_EXPR || code == NE_EXPR)
1867 && TREE_CODE (op0) == SSA_NAME
1868 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1869 && is_gimple_min_invariant (op1))
1870 {
1871 if (code == EQ_EXPR)
1872 {
1873 edge_info = allocate_edge_info (true_edge);
1874 edge_info->lhs = op0;
1875 edge_info->rhs = (integer_zerop (op1)
1876 ? boolean_false_node
1877 : boolean_true_node);
1878
1879 edge_info = allocate_edge_info (false_edge);
1880 edge_info->lhs = op0;
1881 edge_info->rhs = (integer_zerop (op1)
1882 ? boolean_true_node
1883 : boolean_false_node);
1884 }
1885 else
1886 {
1887 edge_info = allocate_edge_info (true_edge);
1888 edge_info->lhs = op0;
1889 edge_info->rhs = (integer_zerop (op1)
1890 ? boolean_true_node
1891 : boolean_false_node);
1892
1893 edge_info = allocate_edge_info (false_edge);
1894 edge_info->lhs = op0;
1895 edge_info->rhs = (integer_zerop (op1)
1896 ? boolean_false_node
1897 : boolean_true_node);
1898 }
1899 }
1900 else if (is_gimple_min_invariant (op0)
1901 && (TREE_CODE (op1) == SSA_NAME
1902 || is_gimple_min_invariant (op1)))
1903 {
1904 tree cond = build2 (code, boolean_type_node, op0, op1);
1905 tree inverted = invert_truthvalue_loc (loc, cond);
1906 bool can_infer_simple_equiv
1907 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1908 && real_zerop (op0));
1909 struct edge_info *edge_info;
1910
1911 edge_info = allocate_edge_info (true_edge);
1912 record_conditions (edge_info, cond, inverted);
1913
1914 if (can_infer_simple_equiv && code == EQ_EXPR)
1915 {
1916 edge_info->lhs = op1;
1917 edge_info->rhs = op0;
1918 }
1919
1920 edge_info = allocate_edge_info (false_edge);
1921 record_conditions (edge_info, inverted, cond);
1922
1923 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1924 {
1925 edge_info->lhs = op1;
1926 edge_info->rhs = op0;
1927 }
1928 }
1929
1930 else if (TREE_CODE (op0) == SSA_NAME
1931 && (TREE_CODE (op1) == SSA_NAME
1932 || is_gimple_min_invariant (op1)))
1933 {
1934 tree cond = build2 (code, boolean_type_node, op0, op1);
1935 tree inverted = invert_truthvalue_loc (loc, cond);
1936 bool can_infer_simple_equiv
1937 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1938 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1939 struct edge_info *edge_info;
1940
1941 edge_info = allocate_edge_info (true_edge);
1942 record_conditions (edge_info, cond, inverted);
1943
1944 if (can_infer_simple_equiv && code == EQ_EXPR)
1945 {
1946 edge_info->lhs = op0;
1947 edge_info->rhs = op1;
1948 }
1949
1950 edge_info = allocate_edge_info (false_edge);
1951 record_conditions (edge_info, inverted, cond);
1952
1953 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1954 {
1955 edge_info->lhs = op0;
1956 edge_info->rhs = op1;
1957 }
1958 }
1959 }
1960
1961 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1962 }
1963 }
1964
1965 void
1966 dom_opt_dom_walker::before_dom_children (basic_block bb)
1967 {
1968 gimple_stmt_iterator gsi;
1969
1970 if (dump_file && (dump_flags & TDF_DETAILS))
1971 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1972
1973 /* Push a marker on the stacks of local information so that we know how
1974 far to unwind when we finalize this block. */
1975 avail_exprs_stack.safe_push (NULL);
1976 const_and_copies_stack.safe_push (NULL_TREE);
1977
1978 record_equivalences_from_incoming_edge (bb);
1979
1980 /* PHI nodes can create equivalences too. */
1981 record_equivalences_from_phis (bb);
1982
1983 /* Create equivalences from redundant PHIs. PHIs are only truly
1984 redundant when they exist in the same block, so push another
1985 marker and unwind right afterwards. */
1986 avail_exprs_stack.safe_push (NULL);
1987 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1988 eliminate_redundant_computations (&gsi);
1989 remove_local_expressions_from_table ();
1990
1991 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1992 optimize_stmt (bb, gsi);
1993
1994 /* Now prepare to process dominated blocks. */
1995 record_edge_info (bb);
1996 cprop_into_successor_phis (bb);
1997 }
1998
1999 /* We have finished processing the dominator children of BB, perform
2000 any finalization actions in preparation for leaving this node in
2001 the dominator tree. */
2002
2003 void
2004 dom_opt_dom_walker::after_dom_children (basic_block bb)
2005 {
2006 gimple last;
2007
2008 /* If we have an outgoing edge to a block with multiple incoming and
2009 outgoing edges, then we may be able to thread the edge, i.e., we
2010 may be able to statically determine which of the outgoing edges
2011 will be traversed when the incoming edge from BB is traversed. */
2012 if (single_succ_p (bb)
2013 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
2014 && potentially_threadable_block (single_succ (bb)))
2015 {
2016 thread_across_edge (single_succ_edge (bb));
2017 }
2018 else if ((last = last_stmt (bb))
2019 && gimple_code (last) == GIMPLE_COND
2020 && EDGE_COUNT (bb->succs) == 2
2021 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2022 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2023 {
2024 edge true_edge, false_edge;
2025
2026 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2027
2028 /* Only try to thread the edge if it reaches a target block with
2029 more than one predecessor and more than one successor. */
2030 if (potentially_threadable_block (true_edge->dest))
2031 thread_across_edge (true_edge);
2032
2033 /* Similarly for the ELSE arm. */
2034 if (potentially_threadable_block (false_edge->dest))
2035 thread_across_edge (false_edge);
2036
2037 }
2038
2039 /* These remove expressions local to BB from the tables. */
2040 remove_local_expressions_from_table ();
2041 restore_vars_to_original_value ();
2042 }
2043
2044 /* Search for redundant computations in STMT. If any are found, then
2045 replace them with the variable holding the result of the computation.
2046
2047 If safe, record this expression into the available expression hash
2048 table. */
2049
2050 static void
2051 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2052 {
2053 tree expr_type;
2054 tree cached_lhs;
2055 tree def;
2056 bool insert = true;
2057 bool assigns_var_p = false;
2058
2059 gimple stmt = gsi_stmt (*gsi);
2060
2061 if (gimple_code (stmt) == GIMPLE_PHI)
2062 def = gimple_phi_result (stmt);
2063 else
2064 def = gimple_get_lhs (stmt);
2065
2066 /* Certain expressions on the RHS can be optimized away, but can not
2067 themselves be entered into the hash tables. */
2068 if (! def
2069 || TREE_CODE (def) != SSA_NAME
2070 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2071 || gimple_vdef (stmt)
2072 /* Do not record equivalences for increments of ivs. This would create
2073 overlapping live ranges for a very questionable gain. */
2074 || simple_iv_increment_p (stmt))
2075 insert = false;
2076
2077 /* Check if the expression has been computed before. */
2078 cached_lhs = lookup_avail_expr (stmt, insert);
2079
2080 opt_stats.num_exprs_considered++;
2081
2082 /* Get the type of the expression we are trying to optimize. */
2083 if (is_gimple_assign (stmt))
2084 {
2085 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2086 assigns_var_p = true;
2087 }
2088 else if (gimple_code (stmt) == GIMPLE_COND)
2089 expr_type = boolean_type_node;
2090 else if (is_gimple_call (stmt))
2091 {
2092 gcc_assert (gimple_call_lhs (stmt));
2093 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2094 assigns_var_p = true;
2095 }
2096 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2097 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2098 else if (gimple_code (stmt) == GIMPLE_PHI)
2099 /* We can't propagate into a phi, so the logic below doesn't apply.
2100 Instead record an equivalence between the cached LHS and the
2101 PHI result of this statement, provided they are in the same block.
2102 This should be sufficient to kill the redundant phi. */
2103 {
2104 if (def && cached_lhs)
2105 record_const_or_copy (def, cached_lhs);
2106 return;
2107 }
2108 else
2109 gcc_unreachable ();
2110
2111 if (!cached_lhs)
2112 return;
2113
2114 /* It is safe to ignore types here since we have already done
2115 type checking in the hashing and equality routines. In fact
2116 type checking here merely gets in the way of constant
2117 propagation. Also, make sure that it is safe to propagate
2118 CACHED_LHS into the expression in STMT. */
2119 if ((TREE_CODE (cached_lhs) != SSA_NAME
2120 && (assigns_var_p
2121 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2122 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2123 {
2124 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2125 || is_gimple_min_invariant (cached_lhs));
2126
2127 if (dump_file && (dump_flags & TDF_DETAILS))
2128 {
2129 fprintf (dump_file, " Replaced redundant expr '");
2130 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2131 fprintf (dump_file, "' with '");
2132 print_generic_expr (dump_file, cached_lhs, dump_flags);
2133 fprintf (dump_file, "'\n");
2134 }
2135
2136 opt_stats.num_re++;
2137
2138 if (assigns_var_p
2139 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2140 cached_lhs = fold_convert (expr_type, cached_lhs);
2141
2142 propagate_tree_value_into_stmt (gsi, cached_lhs);
2143
2144 /* Since it is always necessary to mark the result as modified,
2145 perhaps we should move this into propagate_tree_value_into_stmt
2146 itself. */
2147 gimple_set_modified (gsi_stmt (*gsi), true);
2148 }
2149 }
2150
2151 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2152 the available expressions table or the const_and_copies table.
2153 Detect and record those equivalences. */
2154 /* We handle only very simple copy equivalences here. The heavy
2155 lifing is done by eliminate_redundant_computations. */
2156
2157 static void
2158 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2159 {
2160 tree lhs;
2161 enum tree_code lhs_code;
2162
2163 gcc_assert (is_gimple_assign (stmt));
2164
2165 lhs = gimple_assign_lhs (stmt);
2166 lhs_code = TREE_CODE (lhs);
2167
2168 if (lhs_code == SSA_NAME
2169 && gimple_assign_single_p (stmt))
2170 {
2171 tree rhs = gimple_assign_rhs1 (stmt);
2172
2173 /* If the RHS of the assignment is a constant or another variable that
2174 may be propagated, register it in the CONST_AND_COPIES table. We
2175 do not need to record unwind data for this, since this is a true
2176 assignment and not an equivalence inferred from a comparison. All
2177 uses of this ssa name are dominated by this assignment, so unwinding
2178 just costs time and space. */
2179 if (may_optimize_p
2180 && (TREE_CODE (rhs) == SSA_NAME
2181 || is_gimple_min_invariant (rhs)))
2182 {
2183 if (dump_file && (dump_flags & TDF_DETAILS))
2184 {
2185 fprintf (dump_file, "==== ASGN ");
2186 print_generic_expr (dump_file, lhs, 0);
2187 fprintf (dump_file, " = ");
2188 print_generic_expr (dump_file, rhs, 0);
2189 fprintf (dump_file, "\n");
2190 }
2191
2192 set_ssa_name_value (lhs, rhs);
2193 }
2194 }
2195
2196 /* A memory store, even an aliased store, creates a useful
2197 equivalence. By exchanging the LHS and RHS, creating suitable
2198 vops and recording the result in the available expression table,
2199 we may be able to expose more redundant loads. */
2200 if (!gimple_has_volatile_ops (stmt)
2201 && gimple_references_memory_p (stmt)
2202 && gimple_assign_single_p (stmt)
2203 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2204 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2205 && !is_gimple_reg (lhs))
2206 {
2207 tree rhs = gimple_assign_rhs1 (stmt);
2208 gassign *new_stmt;
2209
2210 /* Build a new statement with the RHS and LHS exchanged. */
2211 if (TREE_CODE (rhs) == SSA_NAME)
2212 {
2213 /* NOTE tuples. The call to gimple_build_assign below replaced
2214 a call to build_gimple_modify_stmt, which did not set the
2215 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2216 may cause an SSA validation failure, as the LHS may be a
2217 default-initialized name and should have no definition. I'm
2218 a bit dubious of this, as the artificial statement that we
2219 generate here may in fact be ill-formed, but it is simply
2220 used as an internal device in this pass, and never becomes
2221 part of the CFG. */
2222 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2223 new_stmt = gimple_build_assign (rhs, lhs);
2224 SSA_NAME_DEF_STMT (rhs) = defstmt;
2225 }
2226 else
2227 new_stmt = gimple_build_assign (rhs, lhs);
2228
2229 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2230
2231 /* Finally enter the statement into the available expression
2232 table. */
2233 lookup_avail_expr (new_stmt, true);
2234 }
2235 }
2236
2237 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2238 CONST_AND_COPIES. */
2239
2240 static void
2241 cprop_operand (gimple stmt, use_operand_p op_p)
2242 {
2243 tree val;
2244 tree op = USE_FROM_PTR (op_p);
2245
2246 /* If the operand has a known constant value or it is known to be a
2247 copy of some other variable, use the value or copy stored in
2248 CONST_AND_COPIES. */
2249 val = SSA_NAME_VALUE (op);
2250 if (val && val != op)
2251 {
2252 /* Do not replace hard register operands in asm statements. */
2253 if (gimple_code (stmt) == GIMPLE_ASM
2254 && !may_propagate_copy_into_asm (op))
2255 return;
2256
2257 /* Certain operands are not allowed to be copy propagated due
2258 to their interaction with exception handling and some GCC
2259 extensions. */
2260 if (!may_propagate_copy (op, val))
2261 return;
2262
2263 /* Do not propagate copies into simple IV increment statements.
2264 See PR23821 for how this can disturb IV analysis. */
2265 if (TREE_CODE (val) != INTEGER_CST
2266 && simple_iv_increment_p (stmt))
2267 return;
2268
2269 /* Dump details. */
2270 if (dump_file && (dump_flags & TDF_DETAILS))
2271 {
2272 fprintf (dump_file, " Replaced '");
2273 print_generic_expr (dump_file, op, dump_flags);
2274 fprintf (dump_file, "' with %s '",
2275 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2276 print_generic_expr (dump_file, val, dump_flags);
2277 fprintf (dump_file, "'\n");
2278 }
2279
2280 if (TREE_CODE (val) != SSA_NAME)
2281 opt_stats.num_const_prop++;
2282 else
2283 opt_stats.num_copy_prop++;
2284
2285 propagate_value (op_p, val);
2286
2287 /* And note that we modified this statement. This is now
2288 safe, even if we changed virtual operands since we will
2289 rescan the statement and rewrite its operands again. */
2290 gimple_set_modified (stmt, true);
2291 }
2292 }
2293
2294 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2295 known value for that SSA_NAME (or NULL if no value is known).
2296
2297 Propagate values from CONST_AND_COPIES into the uses, vuses and
2298 vdef_ops of STMT. */
2299
2300 static void
2301 cprop_into_stmt (gimple stmt)
2302 {
2303 use_operand_p op_p;
2304 ssa_op_iter iter;
2305
2306 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2307 cprop_operand (stmt, op_p);
2308 }
2309
2310 /* Optimize the statement pointed to by iterator SI.
2311
2312 We try to perform some simplistic global redundancy elimination and
2313 constant propagation:
2314
2315 1- To detect global redundancy, we keep track of expressions that have
2316 been computed in this block and its dominators. If we find that the
2317 same expression is computed more than once, we eliminate repeated
2318 computations by using the target of the first one.
2319
2320 2- Constant values and copy assignments. This is used to do very
2321 simplistic constant and copy propagation. When a constant or copy
2322 assignment is found, we map the value on the RHS of the assignment to
2323 the variable in the LHS in the CONST_AND_COPIES table. */
2324
2325 static void
2326 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2327 {
2328 gimple stmt, old_stmt;
2329 bool may_optimize_p;
2330 bool modified_p = false;
2331
2332 old_stmt = stmt = gsi_stmt (si);
2333
2334 if (dump_file && (dump_flags & TDF_DETAILS))
2335 {
2336 fprintf (dump_file, "Optimizing statement ");
2337 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2338 }
2339
2340 if (gimple_code (stmt) == GIMPLE_COND)
2341 canonicalize_comparison (as_a <gcond *> (stmt));
2342
2343 update_stmt_if_modified (stmt);
2344 opt_stats.num_stmts++;
2345
2346 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2347 cprop_into_stmt (stmt);
2348
2349 /* If the statement has been modified with constant replacements,
2350 fold its RHS before checking for redundant computations. */
2351 if (gimple_modified_p (stmt))
2352 {
2353 tree rhs = NULL;
2354
2355 /* Try to fold the statement making sure that STMT is kept
2356 up to date. */
2357 if (fold_stmt (&si))
2358 {
2359 stmt = gsi_stmt (si);
2360 gimple_set_modified (stmt, true);
2361
2362 if (dump_file && (dump_flags & TDF_DETAILS))
2363 {
2364 fprintf (dump_file, " Folded to: ");
2365 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2366 }
2367 }
2368
2369 /* We only need to consider cases that can yield a gimple operand. */
2370 if (gimple_assign_single_p (stmt))
2371 rhs = gimple_assign_rhs1 (stmt);
2372 else if (gimple_code (stmt) == GIMPLE_GOTO)
2373 rhs = gimple_goto_dest (stmt);
2374 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2375 /* This should never be an ADDR_EXPR. */
2376 rhs = gimple_switch_index (swtch_stmt);
2377
2378 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2379 recompute_tree_invariant_for_addr_expr (rhs);
2380
2381 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2382 even if fold_stmt updated the stmt already and thus cleared
2383 gimple_modified_p flag on it. */
2384 modified_p = true;
2385 }
2386
2387 /* Check for redundant computations. Do this optimization only
2388 for assignments that have no volatile ops and conditionals. */
2389 may_optimize_p = (!gimple_has_side_effects (stmt)
2390 && (is_gimple_assign (stmt)
2391 || (is_gimple_call (stmt)
2392 && gimple_call_lhs (stmt) != NULL_TREE)
2393 || gimple_code (stmt) == GIMPLE_COND
2394 || gimple_code (stmt) == GIMPLE_SWITCH));
2395
2396 if (may_optimize_p)
2397 {
2398 if (gimple_code (stmt) == GIMPLE_CALL)
2399 {
2400 /* Resolve __builtin_constant_p. If it hasn't been
2401 folded to integer_one_node by now, it's fairly
2402 certain that the value simply isn't constant. */
2403 tree callee = gimple_call_fndecl (stmt);
2404 if (callee
2405 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2406 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2407 {
2408 propagate_tree_value_into_stmt (&si, integer_zero_node);
2409 stmt = gsi_stmt (si);
2410 }
2411 }
2412
2413 update_stmt_if_modified (stmt);
2414 eliminate_redundant_computations (&si);
2415 stmt = gsi_stmt (si);
2416
2417 /* Perform simple redundant store elimination. */
2418 if (gimple_assign_single_p (stmt)
2419 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2420 {
2421 tree lhs = gimple_assign_lhs (stmt);
2422 tree rhs = gimple_assign_rhs1 (stmt);
2423 tree cached_lhs;
2424 gassign *new_stmt;
2425 if (TREE_CODE (rhs) == SSA_NAME)
2426 {
2427 tree tem = SSA_NAME_VALUE (rhs);
2428 if (tem)
2429 rhs = tem;
2430 }
2431 /* Build a new statement with the RHS and LHS exchanged. */
2432 if (TREE_CODE (rhs) == SSA_NAME)
2433 {
2434 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2435 new_stmt = gimple_build_assign (rhs, lhs);
2436 SSA_NAME_DEF_STMT (rhs) = defstmt;
2437 }
2438 else
2439 new_stmt = gimple_build_assign (rhs, lhs);
2440 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2441 cached_lhs = lookup_avail_expr (new_stmt, false);
2442 if (cached_lhs
2443 && rhs == cached_lhs)
2444 {
2445 basic_block bb = gimple_bb (stmt);
2446 unlink_stmt_vdef (stmt);
2447 if (gsi_remove (&si, true))
2448 {
2449 bitmap_set_bit (need_eh_cleanup, bb->index);
2450 if (dump_file && (dump_flags & TDF_DETAILS))
2451 fprintf (dump_file, " Flagged to clear EH edges.\n");
2452 }
2453 release_defs (stmt);
2454 return;
2455 }
2456 }
2457 }
2458
2459 /* Record any additional equivalences created by this statement. */
2460 if (is_gimple_assign (stmt))
2461 record_equivalences_from_stmt (stmt, may_optimize_p);
2462
2463 /* If STMT is a COND_EXPR and it was modified, then we may know
2464 where it goes. If that is the case, then mark the CFG as altered.
2465
2466 This will cause us to later call remove_unreachable_blocks and
2467 cleanup_tree_cfg when it is safe to do so. It is not safe to
2468 clean things up here since removal of edges and such can trigger
2469 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2470 the manager.
2471
2472 That's all fine and good, except that once SSA_NAMEs are released
2473 to the manager, we must not call create_ssa_name until all references
2474 to released SSA_NAMEs have been eliminated.
2475
2476 All references to the deleted SSA_NAMEs can not be eliminated until
2477 we remove unreachable blocks.
2478
2479 We can not remove unreachable blocks until after we have completed
2480 any queued jump threading.
2481
2482 We can not complete any queued jump threads until we have taken
2483 appropriate variables out of SSA form. Taking variables out of
2484 SSA form can call create_ssa_name and thus we lose.
2485
2486 Ultimately I suspect we're going to need to change the interface
2487 into the SSA_NAME manager. */
2488 if (gimple_modified_p (stmt) || modified_p)
2489 {
2490 tree val = NULL;
2491
2492 update_stmt_if_modified (stmt);
2493
2494 if (gimple_code (stmt) == GIMPLE_COND)
2495 val = fold_binary_loc (gimple_location (stmt),
2496 gimple_cond_code (stmt), boolean_type_node,
2497 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2498 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2499 val = gimple_switch_index (swtch_stmt);
2500
2501 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2502 cfg_altered = true;
2503
2504 /* If we simplified a statement in such a way as to be shown that it
2505 cannot trap, update the eh information and the cfg to match. */
2506 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2507 {
2508 bitmap_set_bit (need_eh_cleanup, bb->index);
2509 if (dump_file && (dump_flags & TDF_DETAILS))
2510 fprintf (dump_file, " Flagged to clear EH edges.\n");
2511 }
2512 }
2513 }
2514
2515 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2516 If found, return its LHS. Otherwise insert STMT in the table and
2517 return NULL_TREE.
2518
2519 Also, when an expression is first inserted in the table, it is also
2520 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2521 we finish processing this block and its children. */
2522
2523 static tree
2524 lookup_avail_expr (gimple stmt, bool insert)
2525 {
2526 expr_hash_elt **slot;
2527 tree lhs;
2528 tree temp;
2529 struct expr_hash_elt element;
2530
2531 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2532 if (gimple_code (stmt) == GIMPLE_PHI)
2533 lhs = gimple_phi_result (stmt);
2534 else
2535 lhs = gimple_get_lhs (stmt);
2536
2537 initialize_hash_element (stmt, lhs, &element);
2538
2539 if (dump_file && (dump_flags & TDF_DETAILS))
2540 {
2541 fprintf (dump_file, "LKUP ");
2542 print_expr_hash_elt (dump_file, &element);
2543 }
2544
2545 /* Don't bother remembering constant assignments and copy operations.
2546 Constants and copy operations are handled by the constant/copy propagator
2547 in optimize_stmt. */
2548 if (element.expr.kind == EXPR_SINGLE
2549 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2550 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2551 return NULL_TREE;
2552
2553 /* Finally try to find the expression in the main expression hash table. */
2554 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2555 if (slot == NULL)
2556 {
2557 free_expr_hash_elt_contents (&element);
2558 return NULL_TREE;
2559 }
2560 else if (*slot == NULL)
2561 {
2562 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2563 *element2 = element;
2564 element2->stamp = element2;
2565 *slot = element2;
2566
2567 if (dump_file && (dump_flags & TDF_DETAILS))
2568 {
2569 fprintf (dump_file, "2>>> ");
2570 print_expr_hash_elt (dump_file, element2);
2571 }
2572
2573 avail_exprs_stack.safe_push (element2);
2574 return NULL_TREE;
2575 }
2576 else
2577 free_expr_hash_elt_contents (&element);
2578
2579 /* Extract the LHS of the assignment so that it can be used as the current
2580 definition of another variable. */
2581 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2582
2583 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2584 use the value from the const_and_copies table. */
2585 if (TREE_CODE (lhs) == SSA_NAME)
2586 {
2587 temp = SSA_NAME_VALUE (lhs);
2588 if (temp)
2589 lhs = temp;
2590 }
2591
2592 if (dump_file && (dump_flags & TDF_DETAILS))
2593 {
2594 fprintf (dump_file, "FIND: ");
2595 print_generic_expr (dump_file, lhs, 0);
2596 fprintf (dump_file, "\n");
2597 }
2598
2599 return lhs;
2600 }
2601
2602 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2603 for expressions using the code of the expression and the SSA numbers of
2604 its operands. */
2605
2606 static hashval_t
2607 avail_expr_hash (const void *p)
2608 {
2609 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2610 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2611 tree vuse;
2612 inchash::hash hstate;
2613
2614 inchash::add_hashable_expr (expr, hstate);
2615
2616 /* If the hash table entry is not associated with a statement, then we
2617 can just hash the expression and not worry about virtual operands
2618 and such. */
2619 if (!stmt)
2620 return hstate.end ();
2621
2622 /* Add the SSA version numbers of the vuse operand. This is important
2623 because compound variables like arrays are not renamed in the
2624 operands. Rather, the rename is done on the virtual variable
2625 representing all the elements of the array. */
2626 if ((vuse = gimple_vuse (stmt)))
2627 inchash::add_expr (vuse, hstate);
2628
2629 return hstate.end ();
2630 }
2631
2632 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2633 up degenerate PHIs created by or exposed by jump threading. */
2634
2635 /* Given a statement STMT, which is either a PHI node or an assignment,
2636 remove it from the IL. */
2637
2638 static void
2639 remove_stmt_or_phi (gimple stmt)
2640 {
2641 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2642
2643 if (gimple_code (stmt) == GIMPLE_PHI)
2644 remove_phi_node (&gsi, true);
2645 else
2646 {
2647 gsi_remove (&gsi, true);
2648 release_defs (stmt);
2649 }
2650 }
2651
2652 /* Given a statement STMT, which is either a PHI node or an assignment,
2653 return the "rhs" of the node, in the case of a non-degenerate
2654 phi, NULL is returned. */
2655
2656 static tree
2657 get_rhs_or_phi_arg (gimple stmt)
2658 {
2659 if (gimple_code (stmt) == GIMPLE_PHI)
2660 return degenerate_phi_result (as_a <gphi *> (stmt));
2661 else if (gimple_assign_single_p (stmt))
2662 return gimple_assign_rhs1 (stmt);
2663 else
2664 gcc_unreachable ();
2665 }
2666
2667
2668 /* Given a statement STMT, which is either a PHI node or an assignment,
2669 return the "lhs" of the node. */
2670
2671 static tree
2672 get_lhs_or_phi_result (gimple stmt)
2673 {
2674 if (gimple_code (stmt) == GIMPLE_PHI)
2675 return gimple_phi_result (stmt);
2676 else if (is_gimple_assign (stmt))
2677 return gimple_assign_lhs (stmt);
2678 else
2679 gcc_unreachable ();
2680 }
2681
2682 /* Propagate RHS into all uses of LHS (when possible).
2683
2684 RHS and LHS are derived from STMT, which is passed in solely so
2685 that we can remove it if propagation is successful.
2686
2687 When propagating into a PHI node or into a statement which turns
2688 into a trivial copy or constant initialization, set the
2689 appropriate bit in INTERESTING_NAMEs so that we will visit those
2690 nodes as well in an effort to pick up secondary optimization
2691 opportunities. */
2692
2693 static void
2694 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2695 {
2696 /* First verify that propagation is valid. */
2697 if (may_propagate_copy (lhs, rhs))
2698 {
2699 use_operand_p use_p;
2700 imm_use_iterator iter;
2701 gimple use_stmt;
2702 bool all = true;
2703
2704 /* Dump details. */
2705 if (dump_file && (dump_flags & TDF_DETAILS))
2706 {
2707 fprintf (dump_file, " Replacing '");
2708 print_generic_expr (dump_file, lhs, dump_flags);
2709 fprintf (dump_file, "' with %s '",
2710 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2711 print_generic_expr (dump_file, rhs, dump_flags);
2712 fprintf (dump_file, "'\n");
2713 }
2714
2715 /* Walk over every use of LHS and try to replace the use with RHS.
2716 At this point the only reason why such a propagation would not
2717 be successful would be if the use occurs in an ASM_EXPR. */
2718 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2719 {
2720 /* Leave debug stmts alone. If we succeed in propagating
2721 all non-debug uses, we'll drop the DEF, and propagation
2722 into debug stmts will occur then. */
2723 if (gimple_debug_bind_p (use_stmt))
2724 continue;
2725
2726 /* It's not always safe to propagate into an ASM_EXPR. */
2727 if (gimple_code (use_stmt) == GIMPLE_ASM
2728 && ! may_propagate_copy_into_asm (lhs))
2729 {
2730 all = false;
2731 continue;
2732 }
2733
2734 /* It's not ok to propagate into the definition stmt of RHS.
2735 <bb 9>:
2736 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2737 g_67.1_6 = prephitmp.12_36;
2738 goto <bb 9>;
2739 While this is strictly all dead code we do not want to
2740 deal with this here. */
2741 if (TREE_CODE (rhs) == SSA_NAME
2742 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2743 {
2744 all = false;
2745 continue;
2746 }
2747
2748 /* Dump details. */
2749 if (dump_file && (dump_flags & TDF_DETAILS))
2750 {
2751 fprintf (dump_file, " Original statement:");
2752 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2753 }
2754
2755 /* Propagate the RHS into this use of the LHS. */
2756 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2757 propagate_value (use_p, rhs);
2758
2759 /* Special cases to avoid useless calls into the folding
2760 routines, operand scanning, etc.
2761
2762 Propagation into a PHI may cause the PHI to become
2763 a degenerate, so mark the PHI as interesting. No other
2764 actions are necessary. */
2765 if (gimple_code (use_stmt) == GIMPLE_PHI)
2766 {
2767 tree result;
2768
2769 /* Dump details. */
2770 if (dump_file && (dump_flags & TDF_DETAILS))
2771 {
2772 fprintf (dump_file, " Updated statement:");
2773 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2774 }
2775
2776 result = get_lhs_or_phi_result (use_stmt);
2777 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2778 continue;
2779 }
2780
2781 /* From this point onward we are propagating into a
2782 real statement. Folding may (or may not) be possible,
2783 we may expose new operands, expose dead EH edges,
2784 etc. */
2785 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2786 cannot fold a call that simplifies to a constant,
2787 because the GIMPLE_CALL must be replaced by a
2788 GIMPLE_ASSIGN, and there is no way to effect such a
2789 transformation in-place. We might want to consider
2790 using the more general fold_stmt here. */
2791 {
2792 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2793 fold_stmt_inplace (&gsi);
2794 }
2795
2796 /* Sometimes propagation can expose new operands to the
2797 renamer. */
2798 update_stmt (use_stmt);
2799
2800 /* Dump details. */
2801 if (dump_file && (dump_flags & TDF_DETAILS))
2802 {
2803 fprintf (dump_file, " Updated statement:");
2804 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2805 }
2806
2807 /* If we replaced a variable index with a constant, then
2808 we would need to update the invariant flag for ADDR_EXPRs. */
2809 if (gimple_assign_single_p (use_stmt)
2810 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2811 recompute_tree_invariant_for_addr_expr
2812 (gimple_assign_rhs1 (use_stmt));
2813
2814 /* If we cleaned up EH information from the statement,
2815 mark its containing block as needing EH cleanups. */
2816 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2817 {
2818 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2819 if (dump_file && (dump_flags & TDF_DETAILS))
2820 fprintf (dump_file, " Flagged to clear EH edges.\n");
2821 }
2822
2823 /* Propagation may expose new trivial copy/constant propagation
2824 opportunities. */
2825 if (gimple_assign_single_p (use_stmt)
2826 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2827 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2828 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2829 {
2830 tree result = get_lhs_or_phi_result (use_stmt);
2831 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2832 }
2833
2834 /* Propagation into these nodes may make certain edges in
2835 the CFG unexecutable. We want to identify them as PHI nodes
2836 at the destination of those unexecutable edges may become
2837 degenerates. */
2838 else if (gimple_code (use_stmt) == GIMPLE_COND
2839 || gimple_code (use_stmt) == GIMPLE_SWITCH
2840 || gimple_code (use_stmt) == GIMPLE_GOTO)
2841 {
2842 tree val;
2843
2844 if (gimple_code (use_stmt) == GIMPLE_COND)
2845 val = fold_binary_loc (gimple_location (use_stmt),
2846 gimple_cond_code (use_stmt),
2847 boolean_type_node,
2848 gimple_cond_lhs (use_stmt),
2849 gimple_cond_rhs (use_stmt));
2850 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2851 val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2852 else
2853 val = gimple_goto_dest (use_stmt);
2854
2855 if (val && is_gimple_min_invariant (val))
2856 {
2857 basic_block bb = gimple_bb (use_stmt);
2858 edge te = find_taken_edge (bb, val);
2859 edge_iterator ei;
2860 edge e;
2861 gimple_stmt_iterator gsi;
2862 gphi_iterator psi;
2863
2864 /* Remove all outgoing edges except TE. */
2865 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2866 {
2867 if (e != te)
2868 {
2869 /* Mark all the PHI nodes at the destination of
2870 the unexecutable edge as interesting. */
2871 for (psi = gsi_start_phis (e->dest);
2872 !gsi_end_p (psi);
2873 gsi_next (&psi))
2874 {
2875 gphi *phi = psi.phi ();
2876
2877 tree result = gimple_phi_result (phi);
2878 int version = SSA_NAME_VERSION (result);
2879
2880 bitmap_set_bit (interesting_names, version);
2881 }
2882
2883 te->probability += e->probability;
2884
2885 te->count += e->count;
2886 remove_edge (e);
2887 cfg_altered = true;
2888 }
2889 else
2890 ei_next (&ei);
2891 }
2892
2893 gsi = gsi_last_bb (gimple_bb (use_stmt));
2894 gsi_remove (&gsi, true);
2895
2896 /* And fixup the flags on the single remaining edge. */
2897 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2898 te->flags &= ~EDGE_ABNORMAL;
2899 te->flags |= EDGE_FALLTHRU;
2900 if (te->probability > REG_BR_PROB_BASE)
2901 te->probability = REG_BR_PROB_BASE;
2902 }
2903 }
2904 }
2905
2906 /* Ensure there is nothing else to do. */
2907 gcc_assert (!all || has_zero_uses (lhs));
2908
2909 /* If we were able to propagate away all uses of LHS, then
2910 we can remove STMT. */
2911 if (all)
2912 remove_stmt_or_phi (stmt);
2913 }
2914 }
2915
2916 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2917 a statement that is a trivial copy or constant initialization.
2918
2919 Attempt to eliminate T by propagating its RHS into all uses of
2920 its LHS. This may in turn set new bits in INTERESTING_NAMES
2921 for nodes we want to revisit later.
2922
2923 All exit paths should clear INTERESTING_NAMES for the result
2924 of STMT. */
2925
2926 static void
2927 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2928 {
2929 tree lhs = get_lhs_or_phi_result (stmt);
2930 tree rhs;
2931 int version = SSA_NAME_VERSION (lhs);
2932
2933 /* If the LHS of this statement or PHI has no uses, then we can
2934 just eliminate it. This can occur if, for example, the PHI
2935 was created by block duplication due to threading and its only
2936 use was in the conditional at the end of the block which was
2937 deleted. */
2938 if (has_zero_uses (lhs))
2939 {
2940 bitmap_clear_bit (interesting_names, version);
2941 remove_stmt_or_phi (stmt);
2942 return;
2943 }
2944
2945 /* Get the RHS of the assignment or PHI node if the PHI is a
2946 degenerate. */
2947 rhs = get_rhs_or_phi_arg (stmt);
2948 if (!rhs)
2949 {
2950 bitmap_clear_bit (interesting_names, version);
2951 return;
2952 }
2953
2954 if (!virtual_operand_p (lhs))
2955 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2956 else
2957 {
2958 gimple use_stmt;
2959 imm_use_iterator iter;
2960 use_operand_p use_p;
2961 /* For virtual operands we have to propagate into all uses as
2962 otherwise we will create overlapping life-ranges. */
2963 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2964 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2965 SET_USE (use_p, rhs);
2966 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2967 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2968 remove_stmt_or_phi (stmt);
2969 }
2970
2971 /* Note that STMT may well have been deleted by now, so do
2972 not access it, instead use the saved version # to clear
2973 T's entry in the worklist. */
2974 bitmap_clear_bit (interesting_names, version);
2975 }
2976
2977 /* The first phase in degenerate PHI elimination.
2978
2979 Eliminate the degenerate PHIs in BB, then recurse on the
2980 dominator children of BB. */
2981
2982 static void
2983 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2984 {
2985 gphi_iterator gsi;
2986 basic_block son;
2987
2988 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2989 {
2990 gphi *phi = gsi.phi ();
2991
2992 eliminate_const_or_copy (phi, interesting_names);
2993 }
2994
2995 /* Recurse into the dominator children of BB. */
2996 for (son = first_dom_son (CDI_DOMINATORS, bb);
2997 son;
2998 son = next_dom_son (CDI_DOMINATORS, son))
2999 eliminate_degenerate_phis_1 (son, interesting_names);
3000 }
3001
3002
3003 /* A very simple pass to eliminate degenerate PHI nodes from the
3004 IL. This is meant to be fast enough to be able to be run several
3005 times in the optimization pipeline.
3006
3007 Certain optimizations, particularly those which duplicate blocks
3008 or remove edges from the CFG can create or expose PHIs which are
3009 trivial copies or constant initializations.
3010
3011 While we could pick up these optimizations in DOM or with the
3012 combination of copy-prop and CCP, those solutions are far too
3013 heavy-weight for our needs.
3014
3015 This implementation has two phases so that we can efficiently
3016 eliminate the first order degenerate PHIs and second order
3017 degenerate PHIs.
3018
3019 The first phase performs a dominator walk to identify and eliminate
3020 the vast majority of the degenerate PHIs. When a degenerate PHI
3021 is identified and eliminated any affected statements or PHIs
3022 are put on a worklist.
3023
3024 The second phase eliminates degenerate PHIs and trivial copies
3025 or constant initializations using the worklist. This is how we
3026 pick up the secondary optimization opportunities with minimal
3027 cost. */
3028
3029 namespace {
3030
3031 const pass_data pass_data_phi_only_cprop =
3032 {
3033 GIMPLE_PASS, /* type */
3034 "phicprop", /* name */
3035 OPTGROUP_NONE, /* optinfo_flags */
3036 TV_TREE_PHI_CPROP, /* tv_id */
3037 ( PROP_cfg | PROP_ssa ), /* properties_required */
3038 0, /* properties_provided */
3039 0, /* properties_destroyed */
3040 0, /* todo_flags_start */
3041 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3042 };
3043
3044 class pass_phi_only_cprop : public gimple_opt_pass
3045 {
3046 public:
3047 pass_phi_only_cprop (gcc::context *ctxt)
3048 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3049 {}
3050
3051 /* opt_pass methods: */
3052 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3053 virtual bool gate (function *) { return flag_tree_dom != 0; }
3054 virtual unsigned int execute (function *);
3055
3056 }; // class pass_phi_only_cprop
3057
3058 unsigned int
3059 pass_phi_only_cprop::execute (function *fun)
3060 {
3061 bitmap interesting_names;
3062 bitmap interesting_names1;
3063
3064 /* Bitmap of blocks which need EH information updated. We can not
3065 update it on-the-fly as doing so invalidates the dominator tree. */
3066 need_eh_cleanup = BITMAP_ALLOC (NULL);
3067
3068 /* INTERESTING_NAMES is effectively our worklist, indexed by
3069 SSA_NAME_VERSION.
3070
3071 A set bit indicates that the statement or PHI node which
3072 defines the SSA_NAME should be (re)examined to determine if
3073 it has become a degenerate PHI or trivial const/copy propagation
3074 opportunity.
3075
3076 Experiments have show we generally get better compilation
3077 time behavior with bitmaps rather than sbitmaps. */
3078 interesting_names = BITMAP_ALLOC (NULL);
3079 interesting_names1 = BITMAP_ALLOC (NULL);
3080
3081 calculate_dominance_info (CDI_DOMINATORS);
3082 cfg_altered = false;
3083
3084 /* First phase. Eliminate degenerate PHIs via a dominator
3085 walk of the CFG.
3086
3087 Experiments have indicated that we generally get better
3088 compile-time behavior by visiting blocks in the first
3089 phase in dominator order. Presumably this is because walking
3090 in dominator order leaves fewer PHIs for later examination
3091 by the worklist phase. */
3092 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3093 interesting_names);
3094
3095 /* Second phase. Eliminate second order degenerate PHIs as well
3096 as trivial copies or constant initializations identified by
3097 the first phase or this phase. Basically we keep iterating
3098 until our set of INTERESTING_NAMEs is empty. */
3099 while (!bitmap_empty_p (interesting_names))
3100 {
3101 unsigned int i;
3102 bitmap_iterator bi;
3103
3104 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3105 changed during the loop. Copy it to another bitmap and
3106 use that. */
3107 bitmap_copy (interesting_names1, interesting_names);
3108
3109 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3110 {
3111 tree name = ssa_name (i);
3112
3113 /* Ignore SSA_NAMEs that have been released because
3114 their defining statement was deleted (unreachable). */
3115 if (name)
3116 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3117 interesting_names);
3118 }
3119 }
3120
3121 if (cfg_altered)
3122 {
3123 free_dominance_info (CDI_DOMINATORS);
3124 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3125 loops_state_set (LOOPS_NEED_FIXUP);
3126 }
3127
3128 /* Propagation of const and copies may make some EH edges dead. Purge
3129 such edges from the CFG as needed. */
3130 if (!bitmap_empty_p (need_eh_cleanup))
3131 {
3132 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3133 BITMAP_FREE (need_eh_cleanup);
3134 }
3135
3136 BITMAP_FREE (interesting_names);
3137 BITMAP_FREE (interesting_names1);
3138 return 0;
3139 }
3140
3141 } // anon namespace
3142
3143 gimple_opt_pass *
3144 make_pass_phi_only_cprop (gcc::context *ctxt)
3145 {
3146 return new pass_phi_only_cprop (ctxt);
3147 }