]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-dom.c
d0501a2d23d68909a86c7ec645fb280ce667417b
[thirdparty/gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "flags.h"
29 #include "tm_p.h"
30 #include "basic-block.h"
31 #include "cfgloop.h"
32 #include "function.h"
33 #include "gimple-pretty-print.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-fold.h"
37 #include "tree-eh.h"
38 #include "gimple-expr.h"
39 #include "is-a.h"
40 #include "gimple.h"
41 #include "gimple-iterator.h"
42 #include "gimple-ssa.h"
43 #include "tree-cfg.h"
44 #include "tree-phinodes.h"
45 #include "ssa-iterators.h"
46 #include "stringpool.h"
47 #include "tree-ssanames.h"
48 #include "tree-into-ssa.h"
49 #include "domwalk.h"
50 #include "tree-pass.h"
51 #include "tree-ssa-propagate.h"
52 #include "tree-ssa-threadupdate.h"
53 #include "langhooks.h"
54 #include "params.h"
55 #include "tree-ssa-threadedge.h"
56 #include "tree-ssa-dom.h"
57
58 /* This file implements optimizations on the dominator tree. */
59
60 /* Representation of a "naked" right-hand-side expression, to be used
61 in recording available expressions in the expression hash table. */
62
63 enum expr_kind
64 {
65 EXPR_SINGLE,
66 EXPR_UNARY,
67 EXPR_BINARY,
68 EXPR_TERNARY,
69 EXPR_CALL,
70 EXPR_PHI
71 };
72
73 struct hashable_expr
74 {
75 tree type;
76 enum expr_kind kind;
77 union {
78 struct { tree rhs; } single;
79 struct { enum tree_code op; tree opnd; } unary;
80 struct { enum tree_code op; tree opnd0, opnd1; } binary;
81 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
82 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
83 struct { size_t nargs; tree *args; } phi;
84 } ops;
85 };
86
87 /* Structure for recording known values of a conditional expression
88 at the exits from its block. */
89
90 typedef struct cond_equivalence_s
91 {
92 struct hashable_expr cond;
93 tree value;
94 } cond_equivalence;
95
96
97 /* Structure for recording edge equivalences as well as any pending
98 edge redirections during the dominator optimizer.
99
100 Computing and storing the edge equivalences instead of creating
101 them on-demand can save significant amounts of time, particularly
102 for pathological cases involving switch statements.
103
104 These structures live for a single iteration of the dominator
105 optimizer in the edge's AUX field. At the end of an iteration we
106 free each of these structures and update the AUX field to point
107 to any requested redirection target (the code for updating the
108 CFG and SSA graph for edge redirection expects redirection edge
109 targets to be in the AUX field for each edge. */
110
111 struct edge_info
112 {
113 /* If this edge creates a simple equivalence, the LHS and RHS of
114 the equivalence will be stored here. */
115 tree lhs;
116 tree rhs;
117
118 /* Traversing an edge may also indicate one or more particular conditions
119 are true or false. */
120 vec<cond_equivalence> cond_equivalences;
121 };
122
123 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
124 expressions it enters into the hash table along with a marker entry
125 (null). When we finish processing the block, we pop off entries and
126 remove the expressions from the global hash table until we hit the
127 marker. */
128 typedef struct expr_hash_elt * expr_hash_elt_t;
129
130 static vec<expr_hash_elt_t> avail_exprs_stack;
131
132 /* Structure for entries in the expression hash table. */
133
134 struct expr_hash_elt
135 {
136 /* The value (lhs) of this expression. */
137 tree lhs;
138
139 /* The expression (rhs) we want to record. */
140 struct hashable_expr expr;
141
142 /* The stmt pointer if this element corresponds to a statement. */
143 gimple stmt;
144
145 /* The hash value for RHS. */
146 hashval_t hash;
147
148 /* A unique stamp, typically the address of the hash
149 element itself, used in removing entries from the table. */
150 struct expr_hash_elt *stamp;
151 };
152
153 /* Hashtable helpers. */
154
155 static bool hashable_expr_equal_p (const struct hashable_expr *,
156 const struct hashable_expr *);
157 static void free_expr_hash_elt (void *);
158
159 struct expr_elt_hasher
160 {
161 typedef expr_hash_elt *value_type;
162 typedef expr_hash_elt *compare_type;
163 typedef int store_values_directly;
164 static inline hashval_t hash (const value_type &);
165 static inline bool equal (const value_type &, const compare_type &);
166 static inline void remove (value_type &);
167 };
168
169 inline hashval_t
170 expr_elt_hasher::hash (const value_type &p)
171 {
172 return p->hash;
173 }
174
175 inline bool
176 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
177 {
178 gimple stmt1 = p1->stmt;
179 const struct hashable_expr *expr1 = &p1->expr;
180 const struct expr_hash_elt *stamp1 = p1->stamp;
181 gimple stmt2 = p2->stmt;
182 const struct hashable_expr *expr2 = &p2->expr;
183 const struct expr_hash_elt *stamp2 = p2->stamp;
184
185 /* This case should apply only when removing entries from the table. */
186 if (stamp1 == stamp2)
187 return true;
188
189 /* FIXME tuples:
190 We add stmts to a hash table and them modify them. To detect the case
191 that we modify a stmt and then search for it, we assume that the hash
192 is always modified by that change.
193 We have to fully check why this doesn't happen on trunk or rewrite
194 this in a more reliable (and easier to understand) way. */
195 if (((const struct expr_hash_elt *)p1)->hash
196 != ((const struct expr_hash_elt *)p2)->hash)
197 return false;
198
199 /* In case of a collision, both RHS have to be identical and have the
200 same VUSE operands. */
201 if (hashable_expr_equal_p (expr1, expr2)
202 && types_compatible_p (expr1->type, expr2->type))
203 {
204 /* Note that STMT1 and/or STMT2 may be NULL. */
205 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
206 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
207 }
208
209 return false;
210 }
211
212 /* Delete an expr_hash_elt and reclaim its storage. */
213
214 inline void
215 expr_elt_hasher::remove (value_type &element)
216 {
217 free_expr_hash_elt (element);
218 }
219
220 /* Hash table with expressions made available during the renaming process.
221 When an assignment of the form X_i = EXPR is found, the statement is
222 stored in this table. If the same expression EXPR is later found on the
223 RHS of another statement, it is replaced with X_i (thus performing
224 global redundancy elimination). Similarly as we pass through conditionals
225 we record the conditional itself as having either a true or false value
226 in this table. */
227 static hash_table<expr_elt_hasher> *avail_exprs;
228
229 /* Stack of dest,src pairs that need to be restored during finalization.
230
231 A NULL entry is used to mark the end of pairs which need to be
232 restored during finalization of this block. */
233 static vec<tree> const_and_copies_stack;
234
235 /* Track whether or not we have changed the control flow graph. */
236 static bool cfg_altered;
237
238 /* Bitmap of blocks that have had EH statements cleaned. We should
239 remove their dead edges eventually. */
240 static bitmap need_eh_cleanup;
241
242 /* Statistics for dominator optimizations. */
243 struct opt_stats_d
244 {
245 long num_stmts;
246 long num_exprs_considered;
247 long num_re;
248 long num_const_prop;
249 long num_copy_prop;
250 };
251
252 static struct opt_stats_d opt_stats;
253
254 /* Local functions. */
255 static void optimize_stmt (basic_block, gimple_stmt_iterator);
256 static tree lookup_avail_expr (gimple, bool);
257 static hashval_t avail_expr_hash (const void *);
258 static void htab_statistics (FILE *,
259 const hash_table<expr_elt_hasher> &);
260 static void record_cond (cond_equivalence *);
261 static void record_const_or_copy (tree, tree);
262 static void record_equality (tree, tree);
263 static void record_equivalences_from_phis (basic_block);
264 static void record_equivalences_from_incoming_edge (basic_block);
265 static void eliminate_redundant_computations (gimple_stmt_iterator *);
266 static void record_equivalences_from_stmt (gimple, int);
267 static void remove_local_expressions_from_table (void);
268 static void restore_vars_to_original_value (void);
269 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
270
271
272 /* Given a statement STMT, initialize the hash table element pointed to
273 by ELEMENT. */
274
275 static void
276 initialize_hash_element (gimple stmt, tree lhs,
277 struct expr_hash_elt *element)
278 {
279 enum gimple_code code = gimple_code (stmt);
280 struct hashable_expr *expr = &element->expr;
281
282 if (code == GIMPLE_ASSIGN)
283 {
284 enum tree_code subcode = gimple_assign_rhs_code (stmt);
285
286 switch (get_gimple_rhs_class (subcode))
287 {
288 case GIMPLE_SINGLE_RHS:
289 expr->kind = EXPR_SINGLE;
290 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
291 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
292 break;
293 case GIMPLE_UNARY_RHS:
294 expr->kind = EXPR_UNARY;
295 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
296 expr->ops.unary.op = subcode;
297 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
298 break;
299 case GIMPLE_BINARY_RHS:
300 expr->kind = EXPR_BINARY;
301 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
302 expr->ops.binary.op = subcode;
303 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
304 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
305 break;
306 case GIMPLE_TERNARY_RHS:
307 expr->kind = EXPR_TERNARY;
308 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
309 expr->ops.ternary.op = subcode;
310 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
311 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
312 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
313 break;
314 default:
315 gcc_unreachable ();
316 }
317 }
318 else if (code == GIMPLE_COND)
319 {
320 expr->type = boolean_type_node;
321 expr->kind = EXPR_BINARY;
322 expr->ops.binary.op = gimple_cond_code (stmt);
323 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
324 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
325 }
326 else if (code == GIMPLE_CALL)
327 {
328 size_t nargs = gimple_call_num_args (stmt);
329 size_t i;
330
331 gcc_assert (gimple_call_lhs (stmt));
332
333 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
334 expr->kind = EXPR_CALL;
335 expr->ops.call.fn_from = stmt;
336
337 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
338 expr->ops.call.pure = true;
339 else
340 expr->ops.call.pure = false;
341
342 expr->ops.call.nargs = nargs;
343 expr->ops.call.args = XCNEWVEC (tree, nargs);
344 for (i = 0; i < nargs; i++)
345 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
346 }
347 else if (code == GIMPLE_SWITCH)
348 {
349 expr->type = TREE_TYPE (gimple_switch_index (stmt));
350 expr->kind = EXPR_SINGLE;
351 expr->ops.single.rhs = gimple_switch_index (stmt);
352 }
353 else if (code == GIMPLE_GOTO)
354 {
355 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
356 expr->kind = EXPR_SINGLE;
357 expr->ops.single.rhs = gimple_goto_dest (stmt);
358 }
359 else if (code == GIMPLE_PHI)
360 {
361 size_t nargs = gimple_phi_num_args (stmt);
362 size_t i;
363
364 expr->type = TREE_TYPE (gimple_phi_result (stmt));
365 expr->kind = EXPR_PHI;
366 expr->ops.phi.nargs = nargs;
367 expr->ops.phi.args = XCNEWVEC (tree, nargs);
368
369 for (i = 0; i < nargs; i++)
370 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
371 }
372 else
373 gcc_unreachable ();
374
375 element->lhs = lhs;
376 element->stmt = stmt;
377 element->hash = avail_expr_hash (element);
378 element->stamp = element;
379 }
380
381 /* Given a conditional expression COND as a tree, initialize
382 a hashable_expr expression EXPR. The conditional must be a
383 comparison or logical negation. A constant or a variable is
384 not permitted. */
385
386 static void
387 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
388 {
389 expr->type = boolean_type_node;
390
391 if (COMPARISON_CLASS_P (cond))
392 {
393 expr->kind = EXPR_BINARY;
394 expr->ops.binary.op = TREE_CODE (cond);
395 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
396 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
397 }
398 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
399 {
400 expr->kind = EXPR_UNARY;
401 expr->ops.unary.op = TRUTH_NOT_EXPR;
402 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
403 }
404 else
405 gcc_unreachable ();
406 }
407
408 /* Given a hashable_expr expression EXPR and an LHS,
409 initialize the hash table element pointed to by ELEMENT. */
410
411 static void
412 initialize_hash_element_from_expr (struct hashable_expr *expr,
413 tree lhs,
414 struct expr_hash_elt *element)
415 {
416 element->expr = *expr;
417 element->lhs = lhs;
418 element->stmt = NULL;
419 element->hash = avail_expr_hash (element);
420 element->stamp = element;
421 }
422
423 /* Compare two hashable_expr structures for equivalence.
424 They are considered equivalent when the the expressions
425 they denote must necessarily be equal. The logic is intended
426 to follow that of operand_equal_p in fold-const.c */
427
428 static bool
429 hashable_expr_equal_p (const struct hashable_expr *expr0,
430 const struct hashable_expr *expr1)
431 {
432 tree type0 = expr0->type;
433 tree type1 = expr1->type;
434
435 /* If either type is NULL, there is nothing to check. */
436 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
437 return false;
438
439 /* If both types don't have the same signedness, precision, and mode,
440 then we can't consider them equal. */
441 if (type0 != type1
442 && (TREE_CODE (type0) == ERROR_MARK
443 || TREE_CODE (type1) == ERROR_MARK
444 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
445 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
446 || TYPE_MODE (type0) != TYPE_MODE (type1)))
447 return false;
448
449 if (expr0->kind != expr1->kind)
450 return false;
451
452 switch (expr0->kind)
453 {
454 case EXPR_SINGLE:
455 return operand_equal_p (expr0->ops.single.rhs,
456 expr1->ops.single.rhs, 0);
457
458 case EXPR_UNARY:
459 if (expr0->ops.unary.op != expr1->ops.unary.op)
460 return false;
461
462 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
463 || expr0->ops.unary.op == NON_LVALUE_EXPR)
464 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
465 return false;
466
467 return operand_equal_p (expr0->ops.unary.opnd,
468 expr1->ops.unary.opnd, 0);
469
470 case EXPR_BINARY:
471 if (expr0->ops.binary.op != expr1->ops.binary.op)
472 return false;
473
474 if (operand_equal_p (expr0->ops.binary.opnd0,
475 expr1->ops.binary.opnd0, 0)
476 && operand_equal_p (expr0->ops.binary.opnd1,
477 expr1->ops.binary.opnd1, 0))
478 return true;
479
480 /* For commutative ops, allow the other order. */
481 return (commutative_tree_code (expr0->ops.binary.op)
482 && operand_equal_p (expr0->ops.binary.opnd0,
483 expr1->ops.binary.opnd1, 0)
484 && operand_equal_p (expr0->ops.binary.opnd1,
485 expr1->ops.binary.opnd0, 0));
486
487 case EXPR_TERNARY:
488 if (expr0->ops.ternary.op != expr1->ops.ternary.op
489 || !operand_equal_p (expr0->ops.ternary.opnd2,
490 expr1->ops.ternary.opnd2, 0))
491 return false;
492
493 if (operand_equal_p (expr0->ops.ternary.opnd0,
494 expr1->ops.ternary.opnd0, 0)
495 && operand_equal_p (expr0->ops.ternary.opnd1,
496 expr1->ops.ternary.opnd1, 0))
497 return true;
498
499 /* For commutative ops, allow the other order. */
500 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
501 && operand_equal_p (expr0->ops.ternary.opnd0,
502 expr1->ops.ternary.opnd1, 0)
503 && operand_equal_p (expr0->ops.ternary.opnd1,
504 expr1->ops.ternary.opnd0, 0));
505
506 case EXPR_CALL:
507 {
508 size_t i;
509
510 /* If the calls are to different functions, then they
511 clearly cannot be equal. */
512 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
513 expr1->ops.call.fn_from))
514 return false;
515
516 if (! expr0->ops.call.pure)
517 return false;
518
519 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
520 return false;
521
522 for (i = 0; i < expr0->ops.call.nargs; i++)
523 if (! operand_equal_p (expr0->ops.call.args[i],
524 expr1->ops.call.args[i], 0))
525 return false;
526
527 if (stmt_could_throw_p (expr0->ops.call.fn_from))
528 {
529 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
530 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
531 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
532 return false;
533 }
534
535 return true;
536 }
537
538 case EXPR_PHI:
539 {
540 size_t i;
541
542 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
543 return false;
544
545 for (i = 0; i < expr0->ops.phi.nargs; i++)
546 if (! operand_equal_p (expr0->ops.phi.args[i],
547 expr1->ops.phi.args[i], 0))
548 return false;
549
550 return true;
551 }
552
553 default:
554 gcc_unreachable ();
555 }
556 }
557
558 /* Generate a hash value for a pair of expressions. This can be used
559 iteratively by passing a previous result as the VAL argument.
560
561 The same hash value is always returned for a given pair of expressions,
562 regardless of the order in which they are presented. This is useful in
563 hashing the operands of commutative functions. */
564
565 static hashval_t
566 iterative_hash_exprs_commutative (const_tree t1,
567 const_tree t2, hashval_t val)
568 {
569 hashval_t one = iterative_hash_expr (t1, 0);
570 hashval_t two = iterative_hash_expr (t2, 0);
571 hashval_t t;
572
573 if (one > two)
574 t = one, one = two, two = t;
575 val = iterative_hash_hashval_t (one, val);
576 val = iterative_hash_hashval_t (two, val);
577
578 return val;
579 }
580
581 /* Compute a hash value for a hashable_expr value EXPR and a
582 previously accumulated hash value VAL. If two hashable_expr
583 values compare equal with hashable_expr_equal_p, they must
584 hash to the same value, given an identical value of VAL.
585 The logic is intended to follow iterative_hash_expr in tree.c. */
586
587 static hashval_t
588 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
589 {
590 switch (expr->kind)
591 {
592 case EXPR_SINGLE:
593 val = iterative_hash_expr (expr->ops.single.rhs, val);
594 break;
595
596 case EXPR_UNARY:
597 val = iterative_hash_object (expr->ops.unary.op, val);
598
599 /* Make sure to include signedness in the hash computation.
600 Don't hash the type, that can lead to having nodes which
601 compare equal according to operand_equal_p, but which
602 have different hash codes. */
603 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
604 || expr->ops.unary.op == NON_LVALUE_EXPR)
605 val += TYPE_UNSIGNED (expr->type);
606
607 val = iterative_hash_expr (expr->ops.unary.opnd, val);
608 break;
609
610 case EXPR_BINARY:
611 val = iterative_hash_object (expr->ops.binary.op, val);
612 if (commutative_tree_code (expr->ops.binary.op))
613 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
614 expr->ops.binary.opnd1, val);
615 else
616 {
617 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
618 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
619 }
620 break;
621
622 case EXPR_TERNARY:
623 val = iterative_hash_object (expr->ops.ternary.op, val);
624 if (commutative_ternary_tree_code (expr->ops.ternary.op))
625 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
626 expr->ops.ternary.opnd1, val);
627 else
628 {
629 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
630 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
631 }
632 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
633 break;
634
635 case EXPR_CALL:
636 {
637 size_t i;
638 enum tree_code code = CALL_EXPR;
639 gimple fn_from;
640
641 val = iterative_hash_object (code, val);
642 fn_from = expr->ops.call.fn_from;
643 if (gimple_call_internal_p (fn_from))
644 val = iterative_hash_hashval_t
645 ((hashval_t) gimple_call_internal_fn (fn_from), val);
646 else
647 val = iterative_hash_expr (gimple_call_fn (fn_from), val);
648 for (i = 0; i < expr->ops.call.nargs; i++)
649 val = iterative_hash_expr (expr->ops.call.args[i], val);
650 }
651 break;
652
653 case EXPR_PHI:
654 {
655 size_t i;
656
657 for (i = 0; i < expr->ops.phi.nargs; i++)
658 val = iterative_hash_expr (expr->ops.phi.args[i], val);
659 }
660 break;
661
662 default:
663 gcc_unreachable ();
664 }
665
666 return val;
667 }
668
669 /* Print a diagnostic dump of an expression hash table entry. */
670
671 static void
672 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
673 {
674 if (element->stmt)
675 fprintf (stream, "STMT ");
676 else
677 fprintf (stream, "COND ");
678
679 if (element->lhs)
680 {
681 print_generic_expr (stream, element->lhs, 0);
682 fprintf (stream, " = ");
683 }
684
685 switch (element->expr.kind)
686 {
687 case EXPR_SINGLE:
688 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
689 break;
690
691 case EXPR_UNARY:
692 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
693 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
694 break;
695
696 case EXPR_BINARY:
697 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
698 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
699 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
700 break;
701
702 case EXPR_TERNARY:
703 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
704 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
705 fputs (", ", stream);
706 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
707 fputs (", ", stream);
708 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
709 fputs (">", stream);
710 break;
711
712 case EXPR_CALL:
713 {
714 size_t i;
715 size_t nargs = element->expr.ops.call.nargs;
716 gimple fn_from;
717
718 fn_from = element->expr.ops.call.fn_from;
719 if (gimple_call_internal_p (fn_from))
720 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
721 stream);
722 else
723 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
724 fprintf (stream, " (");
725 for (i = 0; i < nargs; i++)
726 {
727 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
728 if (i + 1 < nargs)
729 fprintf (stream, ", ");
730 }
731 fprintf (stream, ")");
732 }
733 break;
734
735 case EXPR_PHI:
736 {
737 size_t i;
738 size_t nargs = element->expr.ops.phi.nargs;
739
740 fprintf (stream, "PHI <");
741 for (i = 0; i < nargs; i++)
742 {
743 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
744 if (i + 1 < nargs)
745 fprintf (stream, ", ");
746 }
747 fprintf (stream, ">");
748 }
749 break;
750 }
751 fprintf (stream, "\n");
752
753 if (element->stmt)
754 {
755 fprintf (stream, " ");
756 print_gimple_stmt (stream, element->stmt, 0, 0);
757 }
758 }
759
760 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
761
762 static void
763 free_expr_hash_elt_contents (struct expr_hash_elt *element)
764 {
765 if (element->expr.kind == EXPR_CALL)
766 free (element->expr.ops.call.args);
767 else if (element->expr.kind == EXPR_PHI)
768 free (element->expr.ops.phi.args);
769 }
770
771 /* Delete an expr_hash_elt and reclaim its storage. */
772
773 static void
774 free_expr_hash_elt (void *elt)
775 {
776 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
777 free_expr_hash_elt_contents (element);
778 free (element);
779 }
780
781 /* Allocate an EDGE_INFO for edge E and attach it to E.
782 Return the new EDGE_INFO structure. */
783
784 static struct edge_info *
785 allocate_edge_info (edge e)
786 {
787 struct edge_info *edge_info;
788
789 edge_info = XCNEW (struct edge_info);
790
791 e->aux = edge_info;
792 return edge_info;
793 }
794
795 /* Free all EDGE_INFO structures associated with edges in the CFG.
796 If a particular edge can be threaded, copy the redirection
797 target from the EDGE_INFO structure into the edge's AUX field
798 as required by code to update the CFG and SSA graph for
799 jump threading. */
800
801 static void
802 free_all_edge_infos (void)
803 {
804 basic_block bb;
805 edge_iterator ei;
806 edge e;
807
808 FOR_EACH_BB_FN (bb, cfun)
809 {
810 FOR_EACH_EDGE (e, ei, bb->preds)
811 {
812 struct edge_info *edge_info = (struct edge_info *) e->aux;
813
814 if (edge_info)
815 {
816 edge_info->cond_equivalences.release ();
817 free (edge_info);
818 e->aux = NULL;
819 }
820 }
821 }
822 }
823
824 class dom_opt_dom_walker : public dom_walker
825 {
826 public:
827 dom_opt_dom_walker (cdi_direction direction)
828 : dom_walker (direction), m_dummy_cond (NULL) {}
829
830 virtual void before_dom_children (basic_block);
831 virtual void after_dom_children (basic_block);
832
833 private:
834 void thread_across_edge (edge);
835
836 gimple m_dummy_cond;
837 };
838
839 /* Jump threading, redundancy elimination and const/copy propagation.
840
841 This pass may expose new symbols that need to be renamed into SSA. For
842 every new symbol exposed, its corresponding bit will be set in
843 VARS_TO_RENAME. */
844
845 namespace {
846
847 const pass_data pass_data_dominator =
848 {
849 GIMPLE_PASS, /* type */
850 "dom", /* name */
851 OPTGROUP_NONE, /* optinfo_flags */
852 true, /* has_execute */
853 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
854 ( PROP_cfg | PROP_ssa ), /* properties_required */
855 0, /* properties_provided */
856 0, /* properties_destroyed */
857 0, /* todo_flags_start */
858 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
859 };
860
861 class pass_dominator : public gimple_opt_pass
862 {
863 public:
864 pass_dominator (gcc::context *ctxt)
865 : gimple_opt_pass (pass_data_dominator, ctxt)
866 {}
867
868 /* opt_pass methods: */
869 opt_pass * clone () { return new pass_dominator (m_ctxt); }
870 virtual bool gate (function *) { return flag_tree_dom != 0; }
871 virtual unsigned int execute (function *);
872
873 }; // class pass_dominator
874
875 unsigned int
876 pass_dominator::execute (function *fun)
877 {
878 memset (&opt_stats, 0, sizeof (opt_stats));
879
880 /* Create our hash tables. */
881 avail_exprs = new hash_table<expr_elt_hasher> (1024);
882 avail_exprs_stack.create (20);
883 const_and_copies_stack.create (20);
884 need_eh_cleanup = BITMAP_ALLOC (NULL);
885
886 calculate_dominance_info (CDI_DOMINATORS);
887 cfg_altered = false;
888
889 /* We need to know loop structures in order to avoid destroying them
890 in jump threading. Note that we still can e.g. thread through loop
891 headers to an exit edge, or through loop header to the loop body, assuming
892 that we update the loop info.
893
894 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
895 to several overly conservative bail-outs in jump threading, case
896 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
897 missing. We should improve jump threading in future then
898 LOOPS_HAVE_PREHEADERS won't be needed here. */
899 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
900
901 /* Initialize the value-handle array. */
902 threadedge_initialize_values ();
903
904 /* We need accurate information regarding back edges in the CFG
905 for jump threading; this may include back edges that are not part of
906 a single loop. */
907 mark_dfs_back_edges ();
908
909 /* Recursively walk the dominator tree optimizing statements. */
910 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
911
912 {
913 gimple_stmt_iterator gsi;
914 basic_block bb;
915 FOR_EACH_BB_FN (bb, fun)
916 {
917 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
918 update_stmt_if_modified (gsi_stmt (gsi));
919 }
920 }
921
922 /* If we exposed any new variables, go ahead and put them into
923 SSA form now, before we handle jump threading. This simplifies
924 interactions between rewriting of _DECL nodes into SSA form
925 and rewriting SSA_NAME nodes into SSA form after block
926 duplication and CFG manipulation. */
927 update_ssa (TODO_update_ssa);
928
929 free_all_edge_infos ();
930
931 /* Thread jumps, creating duplicate blocks as needed. */
932 cfg_altered |= thread_through_all_blocks (first_pass_instance);
933
934 if (cfg_altered)
935 free_dominance_info (CDI_DOMINATORS);
936
937 /* Removal of statements may make some EH edges dead. Purge
938 such edges from the CFG as needed. */
939 if (!bitmap_empty_p (need_eh_cleanup))
940 {
941 unsigned i;
942 bitmap_iterator bi;
943
944 /* Jump threading may have created forwarder blocks from blocks
945 needing EH cleanup; the new successor of these blocks, which
946 has inherited from the original block, needs the cleanup.
947 Don't clear bits in the bitmap, as that can break the bitmap
948 iterator. */
949 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
950 {
951 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
952 if (bb == NULL)
953 continue;
954 while (single_succ_p (bb)
955 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
956 bb = single_succ (bb);
957 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
958 continue;
959 if ((unsigned) bb->index != i)
960 bitmap_set_bit (need_eh_cleanup, bb->index);
961 }
962
963 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
964 bitmap_clear (need_eh_cleanup);
965 }
966
967 statistics_counter_event (fun, "Redundant expressions eliminated",
968 opt_stats.num_re);
969 statistics_counter_event (fun, "Constants propagated",
970 opt_stats.num_const_prop);
971 statistics_counter_event (fun, "Copies propagated",
972 opt_stats.num_copy_prop);
973
974 /* Debugging dumps. */
975 if (dump_file && (dump_flags & TDF_STATS))
976 dump_dominator_optimization_stats (dump_file);
977
978 loop_optimizer_finalize ();
979
980 /* Delete our main hashtable. */
981 delete avail_exprs;
982 avail_exprs = NULL;
983
984 /* Free asserted bitmaps and stacks. */
985 BITMAP_FREE (need_eh_cleanup);
986
987 avail_exprs_stack.release ();
988 const_and_copies_stack.release ();
989
990 /* Free the value-handle array. */
991 threadedge_finalize_values ();
992
993 return 0;
994 }
995
996 } // anon namespace
997
998 gimple_opt_pass *
999 make_pass_dominator (gcc::context *ctxt)
1000 {
1001 return new pass_dominator (ctxt);
1002 }
1003
1004
1005 /* Given a conditional statement CONDSTMT, convert the
1006 condition to a canonical form. */
1007
1008 static void
1009 canonicalize_comparison (gimple condstmt)
1010 {
1011 tree op0;
1012 tree op1;
1013 enum tree_code code;
1014
1015 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1016
1017 op0 = gimple_cond_lhs (condstmt);
1018 op1 = gimple_cond_rhs (condstmt);
1019
1020 code = gimple_cond_code (condstmt);
1021
1022 /* If it would be profitable to swap the operands, then do so to
1023 canonicalize the statement, enabling better optimization.
1024
1025 By placing canonicalization of such expressions here we
1026 transparently keep statements in canonical form, even
1027 when the statement is modified. */
1028 if (tree_swap_operands_p (op0, op1, false))
1029 {
1030 /* For relationals we need to swap the operands
1031 and change the code. */
1032 if (code == LT_EXPR
1033 || code == GT_EXPR
1034 || code == LE_EXPR
1035 || code == GE_EXPR)
1036 {
1037 code = swap_tree_comparison (code);
1038
1039 gimple_cond_set_code (condstmt, code);
1040 gimple_cond_set_lhs (condstmt, op1);
1041 gimple_cond_set_rhs (condstmt, op0);
1042
1043 update_stmt (condstmt);
1044 }
1045 }
1046 }
1047
1048 /* Initialize local stacks for this optimizer and record equivalences
1049 upon entry to BB. Equivalences can come from the edge traversed to
1050 reach BB or they may come from PHI nodes at the start of BB. */
1051
1052 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1053 LIMIT entries left in LOCALs. */
1054
1055 static void
1056 remove_local_expressions_from_table (void)
1057 {
1058 /* Remove all the expressions made available in this block. */
1059 while (avail_exprs_stack.length () > 0)
1060 {
1061 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1062 expr_hash_elt **slot;
1063
1064 if (victim == NULL)
1065 break;
1066
1067 /* This must precede the actual removal from the hash table,
1068 as ELEMENT and the table entry may share a call argument
1069 vector which will be freed during removal. */
1070 if (dump_file && (dump_flags & TDF_DETAILS))
1071 {
1072 fprintf (dump_file, "<<<< ");
1073 print_expr_hash_elt (dump_file, victim);
1074 }
1075
1076 slot = avail_exprs->find_slot (victim, NO_INSERT);
1077 gcc_assert (slot && *slot == victim);
1078 avail_exprs->clear_slot (slot);
1079 }
1080 }
1081
1082 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1083 CONST_AND_COPIES to its original state, stopping when we hit a
1084 NULL marker. */
1085
1086 static void
1087 restore_vars_to_original_value (void)
1088 {
1089 while (const_and_copies_stack.length () > 0)
1090 {
1091 tree prev_value, dest;
1092
1093 dest = const_and_copies_stack.pop ();
1094
1095 if (dest == NULL)
1096 break;
1097
1098 if (dump_file && (dump_flags & TDF_DETAILS))
1099 {
1100 fprintf (dump_file, "<<<< COPY ");
1101 print_generic_expr (dump_file, dest, 0);
1102 fprintf (dump_file, " = ");
1103 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1104 fprintf (dump_file, "\n");
1105 }
1106
1107 prev_value = const_and_copies_stack.pop ();
1108 set_ssa_name_value (dest, prev_value);
1109 }
1110 }
1111
1112 /* A trivial wrapper so that we can present the generic jump
1113 threading code with a simple API for simplifying statements. */
1114 static tree
1115 simplify_stmt_for_jump_threading (gimple stmt,
1116 gimple within_stmt ATTRIBUTE_UNUSED)
1117 {
1118 return lookup_avail_expr (stmt, false);
1119 }
1120
1121 /* Record into the equivalence tables any equivalences implied by
1122 traversing edge E (which are cached in E->aux).
1123
1124 Callers are responsible for managing the unwinding markers. */
1125 static void
1126 record_temporary_equivalences (edge e)
1127 {
1128 int i;
1129 struct edge_info *edge_info = (struct edge_info *) e->aux;
1130
1131 /* If we have info associated with this edge, record it into
1132 our equivalence tables. */
1133 if (edge_info)
1134 {
1135 cond_equivalence *eq;
1136 tree lhs = edge_info->lhs;
1137 tree rhs = edge_info->rhs;
1138
1139 /* If we have a simple NAME = VALUE equivalence, record it. */
1140 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1141 record_const_or_copy (lhs, rhs);
1142
1143 /* If we have 0 = COND or 1 = COND equivalences, record them
1144 into our expression hash tables. */
1145 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1146 record_cond (eq);
1147 }
1148 }
1149
1150 /* Wrapper for common code to attempt to thread an edge. For example,
1151 it handles lazily building the dummy condition and the bookkeeping
1152 when jump threading is successful. */
1153
1154 void
1155 dom_opt_dom_walker::thread_across_edge (edge e)
1156 {
1157 if (! m_dummy_cond)
1158 m_dummy_cond =
1159 gimple_build_cond (NE_EXPR,
1160 integer_zero_node, integer_zero_node,
1161 NULL, NULL);
1162
1163 /* Push a marker on both stacks so we can unwind the tables back to their
1164 current state. */
1165 avail_exprs_stack.safe_push (NULL);
1166 const_and_copies_stack.safe_push (NULL_TREE);
1167
1168 /* Traversing E may result in equivalences we can utilize. */
1169 record_temporary_equivalences (e);
1170
1171 /* With all the edge equivalences in the tables, go ahead and attempt
1172 to thread through E->dest. */
1173 ::thread_across_edge (m_dummy_cond, e, false,
1174 &const_and_copies_stack,
1175 simplify_stmt_for_jump_threading);
1176
1177 /* And restore the various tables to their state before
1178 we threaded this edge.
1179
1180 XXX The code in tree-ssa-threadedge.c will restore the state of
1181 the const_and_copies table. We we just have to restore the expression
1182 table. */
1183 remove_local_expressions_from_table ();
1184 }
1185
1186 /* PHI nodes can create equivalences too.
1187
1188 Ignoring any alternatives which are the same as the result, if
1189 all the alternatives are equal, then the PHI node creates an
1190 equivalence. */
1191
1192 static void
1193 record_equivalences_from_phis (basic_block bb)
1194 {
1195 gimple_stmt_iterator gsi;
1196
1197 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1198 {
1199 gimple phi = gsi_stmt (gsi);
1200
1201 tree lhs = gimple_phi_result (phi);
1202 tree rhs = NULL;
1203 size_t i;
1204
1205 for (i = 0; i < gimple_phi_num_args (phi); i++)
1206 {
1207 tree t = gimple_phi_arg_def (phi, i);
1208
1209 /* Ignore alternatives which are the same as our LHS. Since
1210 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1211 can simply compare pointers. */
1212 if (lhs == t)
1213 continue;
1214
1215 /* If we have not processed an alternative yet, then set
1216 RHS to this alternative. */
1217 if (rhs == NULL)
1218 rhs = t;
1219 /* If we have processed an alternative (stored in RHS), then
1220 see if it is equal to this one. If it isn't, then stop
1221 the search. */
1222 else if (! operand_equal_for_phi_arg_p (rhs, t))
1223 break;
1224 }
1225
1226 /* If we had no interesting alternatives, then all the RHS alternatives
1227 must have been the same as LHS. */
1228 if (!rhs)
1229 rhs = lhs;
1230
1231 /* If we managed to iterate through each PHI alternative without
1232 breaking out of the loop, then we have a PHI which may create
1233 a useful equivalence. We do not need to record unwind data for
1234 this, since this is a true assignment and not an equivalence
1235 inferred from a comparison. All uses of this ssa name are dominated
1236 by this assignment, so unwinding just costs time and space. */
1237 if (i == gimple_phi_num_args (phi)
1238 && may_propagate_copy (lhs, rhs))
1239 set_ssa_name_value (lhs, rhs);
1240 }
1241 }
1242
1243 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1244 return that edge. Otherwise return NULL. */
1245 static edge
1246 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1247 {
1248 edge retval = NULL;
1249 edge e;
1250 edge_iterator ei;
1251
1252 FOR_EACH_EDGE (e, ei, bb->preds)
1253 {
1254 /* A loop back edge can be identified by the destination of
1255 the edge dominating the source of the edge. */
1256 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1257 continue;
1258
1259 /* If we have already seen a non-loop edge, then we must have
1260 multiple incoming non-loop edges and thus we return NULL. */
1261 if (retval)
1262 return NULL;
1263
1264 /* This is the first non-loop incoming edge we have found. Record
1265 it. */
1266 retval = e;
1267 }
1268
1269 return retval;
1270 }
1271
1272 /* Record any equivalences created by the incoming edge to BB. If BB
1273 has more than one incoming edge, then no equivalence is created. */
1274
1275 static void
1276 record_equivalences_from_incoming_edge (basic_block bb)
1277 {
1278 edge e;
1279 basic_block parent;
1280 struct edge_info *edge_info;
1281
1282 /* If our parent block ended with a control statement, then we may be
1283 able to record some equivalences based on which outgoing edge from
1284 the parent was followed. */
1285 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1286
1287 e = single_incoming_edge_ignoring_loop_edges (bb);
1288
1289 /* If we had a single incoming edge from our parent block, then enter
1290 any data associated with the edge into our tables. */
1291 if (e && e->src == parent)
1292 {
1293 unsigned int i;
1294
1295 edge_info = (struct edge_info *) e->aux;
1296
1297 if (edge_info)
1298 {
1299 tree lhs = edge_info->lhs;
1300 tree rhs = edge_info->rhs;
1301 cond_equivalence *eq;
1302
1303 if (lhs)
1304 record_equality (lhs, rhs);
1305
1306 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1307 set via a widening type conversion, then we may be able to record
1308 additional equivalences. */
1309 if (lhs
1310 && TREE_CODE (lhs) == SSA_NAME
1311 && is_gimple_constant (rhs)
1312 && TREE_CODE (rhs) == INTEGER_CST)
1313 {
1314 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1315
1316 if (defstmt
1317 && is_gimple_assign (defstmt)
1318 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1319 {
1320 tree old_rhs = gimple_assign_rhs1 (defstmt);
1321
1322 /* If the conversion widens the original value and
1323 the constant is in the range of the type of OLD_RHS,
1324 then convert the constant and record the equivalence.
1325
1326 Note that int_fits_type_p does not check the precision
1327 if the upper and lower bounds are OK. */
1328 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1329 && (TYPE_PRECISION (TREE_TYPE (lhs))
1330 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1331 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1332 {
1333 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1334 record_equality (old_rhs, newval);
1335 }
1336 }
1337 }
1338
1339 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1340 record_cond (eq);
1341 }
1342 }
1343 }
1344
1345 /* Dump SSA statistics on FILE. */
1346
1347 void
1348 dump_dominator_optimization_stats (FILE *file)
1349 {
1350 fprintf (file, "Total number of statements: %6ld\n\n",
1351 opt_stats.num_stmts);
1352 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1353 opt_stats.num_exprs_considered);
1354
1355 fprintf (file, "\nHash table statistics:\n");
1356
1357 fprintf (file, " avail_exprs: ");
1358 htab_statistics (file, *avail_exprs);
1359 }
1360
1361
1362 /* Dump SSA statistics on stderr. */
1363
1364 DEBUG_FUNCTION void
1365 debug_dominator_optimization_stats (void)
1366 {
1367 dump_dominator_optimization_stats (stderr);
1368 }
1369
1370
1371 /* Dump statistics for the hash table HTAB. */
1372
1373 static void
1374 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1375 {
1376 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1377 (long) htab.size (),
1378 (long) htab.elements (),
1379 htab.collisions ());
1380 }
1381
1382
1383 /* Enter condition equivalence into the expression hash table.
1384 This indicates that a conditional expression has a known
1385 boolean value. */
1386
1387 static void
1388 record_cond (cond_equivalence *p)
1389 {
1390 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1391 expr_hash_elt **slot;
1392
1393 initialize_hash_element_from_expr (&p->cond, p->value, element);
1394
1395 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1396 if (*slot == NULL)
1397 {
1398 *slot = element;
1399
1400 if (dump_file && (dump_flags & TDF_DETAILS))
1401 {
1402 fprintf (dump_file, "1>>> ");
1403 print_expr_hash_elt (dump_file, element);
1404 }
1405
1406 avail_exprs_stack.safe_push (element);
1407 }
1408 else
1409 free_expr_hash_elt (element);
1410 }
1411
1412 /* Build a cond_equivalence record indicating that the comparison
1413 CODE holds between operands OP0 and OP1 and push it to **P. */
1414
1415 static void
1416 build_and_record_new_cond (enum tree_code code,
1417 tree op0, tree op1,
1418 vec<cond_equivalence> *p)
1419 {
1420 cond_equivalence c;
1421 struct hashable_expr *cond = &c.cond;
1422
1423 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1424
1425 cond->type = boolean_type_node;
1426 cond->kind = EXPR_BINARY;
1427 cond->ops.binary.op = code;
1428 cond->ops.binary.opnd0 = op0;
1429 cond->ops.binary.opnd1 = op1;
1430
1431 c.value = boolean_true_node;
1432 p->safe_push (c);
1433 }
1434
1435 /* Record that COND is true and INVERTED is false into the edge information
1436 structure. Also record that any conditions dominated by COND are true
1437 as well.
1438
1439 For example, if a < b is true, then a <= b must also be true. */
1440
1441 static void
1442 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1443 {
1444 tree op0, op1;
1445 cond_equivalence c;
1446
1447 if (!COMPARISON_CLASS_P (cond))
1448 return;
1449
1450 op0 = TREE_OPERAND (cond, 0);
1451 op1 = TREE_OPERAND (cond, 1);
1452
1453 switch (TREE_CODE (cond))
1454 {
1455 case LT_EXPR:
1456 case GT_EXPR:
1457 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1458 {
1459 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1460 &edge_info->cond_equivalences);
1461 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1462 &edge_info->cond_equivalences);
1463 }
1464
1465 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1466 ? LE_EXPR : GE_EXPR),
1467 op0, op1, &edge_info->cond_equivalences);
1468 build_and_record_new_cond (NE_EXPR, op0, op1,
1469 &edge_info->cond_equivalences);
1470 break;
1471
1472 case GE_EXPR:
1473 case LE_EXPR:
1474 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1475 {
1476 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1477 &edge_info->cond_equivalences);
1478 }
1479 break;
1480
1481 case EQ_EXPR:
1482 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1483 {
1484 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1485 &edge_info->cond_equivalences);
1486 }
1487 build_and_record_new_cond (LE_EXPR, op0, op1,
1488 &edge_info->cond_equivalences);
1489 build_and_record_new_cond (GE_EXPR, op0, op1,
1490 &edge_info->cond_equivalences);
1491 break;
1492
1493 case UNORDERED_EXPR:
1494 build_and_record_new_cond (NE_EXPR, op0, op1,
1495 &edge_info->cond_equivalences);
1496 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1497 &edge_info->cond_equivalences);
1498 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1499 &edge_info->cond_equivalences);
1500 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1501 &edge_info->cond_equivalences);
1502 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1503 &edge_info->cond_equivalences);
1504 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1505 &edge_info->cond_equivalences);
1506 break;
1507
1508 case UNLT_EXPR:
1509 case UNGT_EXPR:
1510 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1511 ? UNLE_EXPR : UNGE_EXPR),
1512 op0, op1, &edge_info->cond_equivalences);
1513 build_and_record_new_cond (NE_EXPR, op0, op1,
1514 &edge_info->cond_equivalences);
1515 break;
1516
1517 case UNEQ_EXPR:
1518 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1519 &edge_info->cond_equivalences);
1520 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1521 &edge_info->cond_equivalences);
1522 break;
1523
1524 case LTGT_EXPR:
1525 build_and_record_new_cond (NE_EXPR, op0, op1,
1526 &edge_info->cond_equivalences);
1527 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1528 &edge_info->cond_equivalences);
1529 break;
1530
1531 default:
1532 break;
1533 }
1534
1535 /* Now store the original true and false conditions into the first
1536 two slots. */
1537 initialize_expr_from_cond (cond, &c.cond);
1538 c.value = boolean_true_node;
1539 edge_info->cond_equivalences.safe_push (c);
1540
1541 /* It is possible for INVERTED to be the negation of a comparison,
1542 and not a valid RHS or GIMPLE_COND condition. This happens because
1543 invert_truthvalue may return such an expression when asked to invert
1544 a floating-point comparison. These comparisons are not assumed to
1545 obey the trichotomy law. */
1546 initialize_expr_from_cond (inverted, &c.cond);
1547 c.value = boolean_false_node;
1548 edge_info->cond_equivalences.safe_push (c);
1549 }
1550
1551 /* A helper function for record_const_or_copy and record_equality.
1552 Do the work of recording the value and undo info. */
1553
1554 static void
1555 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1556 {
1557 set_ssa_name_value (x, y);
1558
1559 if (dump_file && (dump_flags & TDF_DETAILS))
1560 {
1561 fprintf (dump_file, "0>>> COPY ");
1562 print_generic_expr (dump_file, x, 0);
1563 fprintf (dump_file, " = ");
1564 print_generic_expr (dump_file, y, 0);
1565 fprintf (dump_file, "\n");
1566 }
1567
1568 const_and_copies_stack.reserve (2);
1569 const_and_copies_stack.quick_push (prev_x);
1570 const_and_copies_stack.quick_push (x);
1571 }
1572
1573 /* Record that X is equal to Y in const_and_copies. Record undo
1574 information in the block-local vector. */
1575
1576 static void
1577 record_const_or_copy (tree x, tree y)
1578 {
1579 tree prev_x = SSA_NAME_VALUE (x);
1580
1581 gcc_assert (TREE_CODE (x) == SSA_NAME);
1582
1583 if (TREE_CODE (y) == SSA_NAME)
1584 {
1585 tree tmp = SSA_NAME_VALUE (y);
1586 if (tmp)
1587 y = tmp;
1588 }
1589
1590 record_const_or_copy_1 (x, y, prev_x);
1591 }
1592
1593 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1594 This constrains the cases in which we may treat this as assignment. */
1595
1596 static void
1597 record_equality (tree x, tree y)
1598 {
1599 tree prev_x = NULL, prev_y = NULL;
1600
1601 if (TREE_CODE (x) == SSA_NAME)
1602 prev_x = SSA_NAME_VALUE (x);
1603 if (TREE_CODE (y) == SSA_NAME)
1604 prev_y = SSA_NAME_VALUE (y);
1605
1606 /* If one of the previous values is invariant, or invariant in more loops
1607 (by depth), then use that.
1608 Otherwise it doesn't matter which value we choose, just so
1609 long as we canonicalize on one value. */
1610 if (is_gimple_min_invariant (y))
1611 ;
1612 else if (is_gimple_min_invariant (x))
1613 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1614 else if (prev_x && is_gimple_min_invariant (prev_x))
1615 x = y, y = prev_x, prev_x = prev_y;
1616 else if (prev_y)
1617 y = prev_y;
1618
1619 /* After the swapping, we must have one SSA_NAME. */
1620 if (TREE_CODE (x) != SSA_NAME)
1621 return;
1622
1623 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1624 variable compared against zero. If we're honoring signed zeros,
1625 then we cannot record this value unless we know that the value is
1626 nonzero. */
1627 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1628 && (TREE_CODE (y) != REAL_CST
1629 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1630 return;
1631
1632 record_const_or_copy_1 (x, y, prev_x);
1633 }
1634
1635 /* Returns true when STMT is a simple iv increment. It detects the
1636 following situation:
1637
1638 i_1 = phi (..., i_2)
1639 i_2 = i_1 +/- ... */
1640
1641 bool
1642 simple_iv_increment_p (gimple stmt)
1643 {
1644 enum tree_code code;
1645 tree lhs, preinc;
1646 gimple phi;
1647 size_t i;
1648
1649 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1650 return false;
1651
1652 lhs = gimple_assign_lhs (stmt);
1653 if (TREE_CODE (lhs) != SSA_NAME)
1654 return false;
1655
1656 code = gimple_assign_rhs_code (stmt);
1657 if (code != PLUS_EXPR
1658 && code != MINUS_EXPR
1659 && code != POINTER_PLUS_EXPR)
1660 return false;
1661
1662 preinc = gimple_assign_rhs1 (stmt);
1663 if (TREE_CODE (preinc) != SSA_NAME)
1664 return false;
1665
1666 phi = SSA_NAME_DEF_STMT (preinc);
1667 if (gimple_code (phi) != GIMPLE_PHI)
1668 return false;
1669
1670 for (i = 0; i < gimple_phi_num_args (phi); i++)
1671 if (gimple_phi_arg_def (phi, i) == lhs)
1672 return true;
1673
1674 return false;
1675 }
1676
1677 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1678 known value for that SSA_NAME (or NULL if no value is known).
1679
1680 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1681 successors of BB. */
1682
1683 static void
1684 cprop_into_successor_phis (basic_block bb)
1685 {
1686 edge e;
1687 edge_iterator ei;
1688
1689 FOR_EACH_EDGE (e, ei, bb->succs)
1690 {
1691 int indx;
1692 gimple_stmt_iterator gsi;
1693
1694 /* If this is an abnormal edge, then we do not want to copy propagate
1695 into the PHI alternative associated with this edge. */
1696 if (e->flags & EDGE_ABNORMAL)
1697 continue;
1698
1699 gsi = gsi_start_phis (e->dest);
1700 if (gsi_end_p (gsi))
1701 continue;
1702
1703 /* We may have an equivalence associated with this edge. While
1704 we can not propagate it into non-dominated blocks, we can
1705 propagate them into PHIs in non-dominated blocks. */
1706
1707 /* Push the unwind marker so we can reset the const and copies
1708 table back to its original state after processing this edge. */
1709 const_and_copies_stack.safe_push (NULL_TREE);
1710
1711 /* Extract and record any simple NAME = VALUE equivalences.
1712
1713 Don't bother with [01] = COND equivalences, they're not useful
1714 here. */
1715 struct edge_info *edge_info = (struct edge_info *) e->aux;
1716 if (edge_info)
1717 {
1718 tree lhs = edge_info->lhs;
1719 tree rhs = edge_info->rhs;
1720
1721 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1722 record_const_or_copy (lhs, rhs);
1723 }
1724
1725 indx = e->dest_idx;
1726 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1727 {
1728 tree new_val;
1729 use_operand_p orig_p;
1730 tree orig_val;
1731 gimple phi = gsi_stmt (gsi);
1732
1733 /* The alternative may be associated with a constant, so verify
1734 it is an SSA_NAME before doing anything with it. */
1735 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1736 orig_val = get_use_from_ptr (orig_p);
1737 if (TREE_CODE (orig_val) != SSA_NAME)
1738 continue;
1739
1740 /* If we have *ORIG_P in our constant/copy table, then replace
1741 ORIG_P with its value in our constant/copy table. */
1742 new_val = SSA_NAME_VALUE (orig_val);
1743 if (new_val
1744 && new_val != orig_val
1745 && (TREE_CODE (new_val) == SSA_NAME
1746 || is_gimple_min_invariant (new_val))
1747 && may_propagate_copy (orig_val, new_val))
1748 propagate_value (orig_p, new_val);
1749 }
1750
1751 restore_vars_to_original_value ();
1752 }
1753 }
1754
1755 /* We have finished optimizing BB, record any information implied by
1756 taking a specific outgoing edge from BB. */
1757
1758 static void
1759 record_edge_info (basic_block bb)
1760 {
1761 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1762 struct edge_info *edge_info;
1763
1764 if (! gsi_end_p (gsi))
1765 {
1766 gimple stmt = gsi_stmt (gsi);
1767 location_t loc = gimple_location (stmt);
1768
1769 if (gimple_code (stmt) == GIMPLE_SWITCH)
1770 {
1771 tree index = gimple_switch_index (stmt);
1772
1773 if (TREE_CODE (index) == SSA_NAME)
1774 {
1775 int i;
1776 int n_labels = gimple_switch_num_labels (stmt);
1777 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1778 edge e;
1779 edge_iterator ei;
1780
1781 for (i = 0; i < n_labels; i++)
1782 {
1783 tree label = gimple_switch_label (stmt, i);
1784 basic_block target_bb = label_to_block (CASE_LABEL (label));
1785 if (CASE_HIGH (label)
1786 || !CASE_LOW (label)
1787 || info[target_bb->index])
1788 info[target_bb->index] = error_mark_node;
1789 else
1790 info[target_bb->index] = label;
1791 }
1792
1793 FOR_EACH_EDGE (e, ei, bb->succs)
1794 {
1795 basic_block target_bb = e->dest;
1796 tree label = info[target_bb->index];
1797
1798 if (label != NULL && label != error_mark_node)
1799 {
1800 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1801 CASE_LOW (label));
1802 edge_info = allocate_edge_info (e);
1803 edge_info->lhs = index;
1804 edge_info->rhs = x;
1805 }
1806 }
1807 free (info);
1808 }
1809 }
1810
1811 /* A COND_EXPR may create equivalences too. */
1812 if (gimple_code (stmt) == GIMPLE_COND)
1813 {
1814 edge true_edge;
1815 edge false_edge;
1816
1817 tree op0 = gimple_cond_lhs (stmt);
1818 tree op1 = gimple_cond_rhs (stmt);
1819 enum tree_code code = gimple_cond_code (stmt);
1820
1821 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1822
1823 /* Special case comparing booleans against a constant as we
1824 know the value of OP0 on both arms of the branch. i.e., we
1825 can record an equivalence for OP0 rather than COND. */
1826 if ((code == EQ_EXPR || code == NE_EXPR)
1827 && TREE_CODE (op0) == SSA_NAME
1828 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1829 && is_gimple_min_invariant (op1))
1830 {
1831 if (code == EQ_EXPR)
1832 {
1833 edge_info = allocate_edge_info (true_edge);
1834 edge_info->lhs = op0;
1835 edge_info->rhs = (integer_zerop (op1)
1836 ? boolean_false_node
1837 : boolean_true_node);
1838
1839 edge_info = allocate_edge_info (false_edge);
1840 edge_info->lhs = op0;
1841 edge_info->rhs = (integer_zerop (op1)
1842 ? boolean_true_node
1843 : boolean_false_node);
1844 }
1845 else
1846 {
1847 edge_info = allocate_edge_info (true_edge);
1848 edge_info->lhs = op0;
1849 edge_info->rhs = (integer_zerop (op1)
1850 ? boolean_true_node
1851 : boolean_false_node);
1852
1853 edge_info = allocate_edge_info (false_edge);
1854 edge_info->lhs = op0;
1855 edge_info->rhs = (integer_zerop (op1)
1856 ? boolean_false_node
1857 : boolean_true_node);
1858 }
1859 }
1860 else if (is_gimple_min_invariant (op0)
1861 && (TREE_CODE (op1) == SSA_NAME
1862 || is_gimple_min_invariant (op1)))
1863 {
1864 tree cond = build2 (code, boolean_type_node, op0, op1);
1865 tree inverted = invert_truthvalue_loc (loc, cond);
1866 bool can_infer_simple_equiv
1867 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1868 && real_zerop (op0));
1869 struct edge_info *edge_info;
1870
1871 edge_info = allocate_edge_info (true_edge);
1872 record_conditions (edge_info, cond, inverted);
1873
1874 if (can_infer_simple_equiv && code == EQ_EXPR)
1875 {
1876 edge_info->lhs = op1;
1877 edge_info->rhs = op0;
1878 }
1879
1880 edge_info = allocate_edge_info (false_edge);
1881 record_conditions (edge_info, inverted, cond);
1882
1883 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1884 {
1885 edge_info->lhs = op1;
1886 edge_info->rhs = op0;
1887 }
1888 }
1889
1890 else if (TREE_CODE (op0) == SSA_NAME
1891 && (TREE_CODE (op1) == SSA_NAME
1892 || is_gimple_min_invariant (op1)))
1893 {
1894 tree cond = build2 (code, boolean_type_node, op0, op1);
1895 tree inverted = invert_truthvalue_loc (loc, cond);
1896 bool can_infer_simple_equiv
1897 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1898 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1899 struct edge_info *edge_info;
1900
1901 edge_info = allocate_edge_info (true_edge);
1902 record_conditions (edge_info, cond, inverted);
1903
1904 if (can_infer_simple_equiv && code == EQ_EXPR)
1905 {
1906 edge_info->lhs = op0;
1907 edge_info->rhs = op1;
1908 }
1909
1910 edge_info = allocate_edge_info (false_edge);
1911 record_conditions (edge_info, inverted, cond);
1912
1913 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1914 {
1915 edge_info->lhs = op0;
1916 edge_info->rhs = op1;
1917 }
1918 }
1919 }
1920
1921 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1922 }
1923 }
1924
1925 void
1926 dom_opt_dom_walker::before_dom_children (basic_block bb)
1927 {
1928 gimple_stmt_iterator gsi;
1929
1930 if (dump_file && (dump_flags & TDF_DETAILS))
1931 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1932
1933 /* Push a marker on the stacks of local information so that we know how
1934 far to unwind when we finalize this block. */
1935 avail_exprs_stack.safe_push (NULL);
1936 const_and_copies_stack.safe_push (NULL_TREE);
1937
1938 record_equivalences_from_incoming_edge (bb);
1939
1940 /* PHI nodes can create equivalences too. */
1941 record_equivalences_from_phis (bb);
1942
1943 /* Create equivalences from redundant PHIs. PHIs are only truly
1944 redundant when they exist in the same block, so push another
1945 marker and unwind right afterwards. */
1946 avail_exprs_stack.safe_push (NULL);
1947 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1948 eliminate_redundant_computations (&gsi);
1949 remove_local_expressions_from_table ();
1950
1951 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1952 optimize_stmt (bb, gsi);
1953
1954 /* Now prepare to process dominated blocks. */
1955 record_edge_info (bb);
1956 cprop_into_successor_phis (bb);
1957 }
1958
1959 /* We have finished processing the dominator children of BB, perform
1960 any finalization actions in preparation for leaving this node in
1961 the dominator tree. */
1962
1963 void
1964 dom_opt_dom_walker::after_dom_children (basic_block bb)
1965 {
1966 gimple last;
1967
1968 /* If we have an outgoing edge to a block with multiple incoming and
1969 outgoing edges, then we may be able to thread the edge, i.e., we
1970 may be able to statically determine which of the outgoing edges
1971 will be traversed when the incoming edge from BB is traversed. */
1972 if (single_succ_p (bb)
1973 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1974 && potentially_threadable_block (single_succ (bb)))
1975 {
1976 thread_across_edge (single_succ_edge (bb));
1977 }
1978 else if ((last = last_stmt (bb))
1979 && gimple_code (last) == GIMPLE_COND
1980 && EDGE_COUNT (bb->succs) == 2
1981 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1982 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1983 {
1984 edge true_edge, false_edge;
1985
1986 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1987
1988 /* Only try to thread the edge if it reaches a target block with
1989 more than one predecessor and more than one successor. */
1990 if (potentially_threadable_block (true_edge->dest))
1991 thread_across_edge (true_edge);
1992
1993 /* Similarly for the ELSE arm. */
1994 if (potentially_threadable_block (false_edge->dest))
1995 thread_across_edge (false_edge);
1996
1997 }
1998
1999 /* These remove expressions local to BB from the tables. */
2000 remove_local_expressions_from_table ();
2001 restore_vars_to_original_value ();
2002 }
2003
2004 /* Search for redundant computations in STMT. If any are found, then
2005 replace them with the variable holding the result of the computation.
2006
2007 If safe, record this expression into the available expression hash
2008 table. */
2009
2010 static void
2011 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2012 {
2013 tree expr_type;
2014 tree cached_lhs;
2015 tree def;
2016 bool insert = true;
2017 bool assigns_var_p = false;
2018
2019 gimple stmt = gsi_stmt (*gsi);
2020
2021 if (gimple_code (stmt) == GIMPLE_PHI)
2022 def = gimple_phi_result (stmt);
2023 else
2024 def = gimple_get_lhs (stmt);
2025
2026 /* Certain expressions on the RHS can be optimized away, but can not
2027 themselves be entered into the hash tables. */
2028 if (! def
2029 || TREE_CODE (def) != SSA_NAME
2030 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2031 || gimple_vdef (stmt)
2032 /* Do not record equivalences for increments of ivs. This would create
2033 overlapping live ranges for a very questionable gain. */
2034 || simple_iv_increment_p (stmt))
2035 insert = false;
2036
2037 /* Check if the expression has been computed before. */
2038 cached_lhs = lookup_avail_expr (stmt, insert);
2039
2040 opt_stats.num_exprs_considered++;
2041
2042 /* Get the type of the expression we are trying to optimize. */
2043 if (is_gimple_assign (stmt))
2044 {
2045 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2046 assigns_var_p = true;
2047 }
2048 else if (gimple_code (stmt) == GIMPLE_COND)
2049 expr_type = boolean_type_node;
2050 else if (is_gimple_call (stmt))
2051 {
2052 gcc_assert (gimple_call_lhs (stmt));
2053 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2054 assigns_var_p = true;
2055 }
2056 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2057 expr_type = TREE_TYPE (gimple_switch_index (stmt));
2058 else if (gimple_code (stmt) == GIMPLE_PHI)
2059 /* We can't propagate into a phi, so the logic below doesn't apply.
2060 Instead record an equivalence between the cached LHS and the
2061 PHI result of this statement, provided they are in the same block.
2062 This should be sufficient to kill the redundant phi. */
2063 {
2064 if (def && cached_lhs)
2065 record_const_or_copy (def, cached_lhs);
2066 return;
2067 }
2068 else
2069 gcc_unreachable ();
2070
2071 if (!cached_lhs)
2072 return;
2073
2074 /* It is safe to ignore types here since we have already done
2075 type checking in the hashing and equality routines. In fact
2076 type checking here merely gets in the way of constant
2077 propagation. Also, make sure that it is safe to propagate
2078 CACHED_LHS into the expression in STMT. */
2079 if ((TREE_CODE (cached_lhs) != SSA_NAME
2080 && (assigns_var_p
2081 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2082 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2083 {
2084 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2085 || is_gimple_min_invariant (cached_lhs));
2086
2087 if (dump_file && (dump_flags & TDF_DETAILS))
2088 {
2089 fprintf (dump_file, " Replaced redundant expr '");
2090 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2091 fprintf (dump_file, "' with '");
2092 print_generic_expr (dump_file, cached_lhs, dump_flags);
2093 fprintf (dump_file, "'\n");
2094 }
2095
2096 opt_stats.num_re++;
2097
2098 if (assigns_var_p
2099 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2100 cached_lhs = fold_convert (expr_type, cached_lhs);
2101
2102 propagate_tree_value_into_stmt (gsi, cached_lhs);
2103
2104 /* Since it is always necessary to mark the result as modified,
2105 perhaps we should move this into propagate_tree_value_into_stmt
2106 itself. */
2107 gimple_set_modified (gsi_stmt (*gsi), true);
2108 }
2109 }
2110
2111 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2112 the available expressions table or the const_and_copies table.
2113 Detect and record those equivalences. */
2114 /* We handle only very simple copy equivalences here. The heavy
2115 lifing is done by eliminate_redundant_computations. */
2116
2117 static void
2118 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2119 {
2120 tree lhs;
2121 enum tree_code lhs_code;
2122
2123 gcc_assert (is_gimple_assign (stmt));
2124
2125 lhs = gimple_assign_lhs (stmt);
2126 lhs_code = TREE_CODE (lhs);
2127
2128 if (lhs_code == SSA_NAME
2129 && gimple_assign_single_p (stmt))
2130 {
2131 tree rhs = gimple_assign_rhs1 (stmt);
2132
2133 /* If the RHS of the assignment is a constant or another variable that
2134 may be propagated, register it in the CONST_AND_COPIES table. We
2135 do not need to record unwind data for this, since this is a true
2136 assignment and not an equivalence inferred from a comparison. All
2137 uses of this ssa name are dominated by this assignment, so unwinding
2138 just costs time and space. */
2139 if (may_optimize_p
2140 && (TREE_CODE (rhs) == SSA_NAME
2141 || is_gimple_min_invariant (rhs)))
2142 {
2143 if (dump_file && (dump_flags & TDF_DETAILS))
2144 {
2145 fprintf (dump_file, "==== ASGN ");
2146 print_generic_expr (dump_file, lhs, 0);
2147 fprintf (dump_file, " = ");
2148 print_generic_expr (dump_file, rhs, 0);
2149 fprintf (dump_file, "\n");
2150 }
2151
2152 set_ssa_name_value (lhs, rhs);
2153 }
2154 }
2155
2156 /* A memory store, even an aliased store, creates a useful
2157 equivalence. By exchanging the LHS and RHS, creating suitable
2158 vops and recording the result in the available expression table,
2159 we may be able to expose more redundant loads. */
2160 if (!gimple_has_volatile_ops (stmt)
2161 && gimple_references_memory_p (stmt)
2162 && gimple_assign_single_p (stmt)
2163 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2164 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2165 && !is_gimple_reg (lhs))
2166 {
2167 tree rhs = gimple_assign_rhs1 (stmt);
2168 gimple new_stmt;
2169
2170 /* Build a new statement with the RHS and LHS exchanged. */
2171 if (TREE_CODE (rhs) == SSA_NAME)
2172 {
2173 /* NOTE tuples. The call to gimple_build_assign below replaced
2174 a call to build_gimple_modify_stmt, which did not set the
2175 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2176 may cause an SSA validation failure, as the LHS may be a
2177 default-initialized name and should have no definition. I'm
2178 a bit dubious of this, as the artificial statement that we
2179 generate here may in fact be ill-formed, but it is simply
2180 used as an internal device in this pass, and never becomes
2181 part of the CFG. */
2182 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2183 new_stmt = gimple_build_assign (rhs, lhs);
2184 SSA_NAME_DEF_STMT (rhs) = defstmt;
2185 }
2186 else
2187 new_stmt = gimple_build_assign (rhs, lhs);
2188
2189 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2190
2191 /* Finally enter the statement into the available expression
2192 table. */
2193 lookup_avail_expr (new_stmt, true);
2194 }
2195 }
2196
2197 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2198 CONST_AND_COPIES. */
2199
2200 static void
2201 cprop_operand (gimple stmt, use_operand_p op_p)
2202 {
2203 tree val;
2204 tree op = USE_FROM_PTR (op_p);
2205
2206 /* If the operand has a known constant value or it is known to be a
2207 copy of some other variable, use the value or copy stored in
2208 CONST_AND_COPIES. */
2209 val = SSA_NAME_VALUE (op);
2210 if (val && val != op)
2211 {
2212 /* Do not replace hard register operands in asm statements. */
2213 if (gimple_code (stmt) == GIMPLE_ASM
2214 && !may_propagate_copy_into_asm (op))
2215 return;
2216
2217 /* Certain operands are not allowed to be copy propagated due
2218 to their interaction with exception handling and some GCC
2219 extensions. */
2220 if (!may_propagate_copy (op, val))
2221 return;
2222
2223 /* Do not propagate copies into simple IV increment statements.
2224 See PR23821 for how this can disturb IV analysis. */
2225 if (TREE_CODE (val) != INTEGER_CST
2226 && simple_iv_increment_p (stmt))
2227 return;
2228
2229 /* Dump details. */
2230 if (dump_file && (dump_flags & TDF_DETAILS))
2231 {
2232 fprintf (dump_file, " Replaced '");
2233 print_generic_expr (dump_file, op, dump_flags);
2234 fprintf (dump_file, "' with %s '",
2235 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2236 print_generic_expr (dump_file, val, dump_flags);
2237 fprintf (dump_file, "'\n");
2238 }
2239
2240 if (TREE_CODE (val) != SSA_NAME)
2241 opt_stats.num_const_prop++;
2242 else
2243 opt_stats.num_copy_prop++;
2244
2245 propagate_value (op_p, val);
2246
2247 /* And note that we modified this statement. This is now
2248 safe, even if we changed virtual operands since we will
2249 rescan the statement and rewrite its operands again. */
2250 gimple_set_modified (stmt, true);
2251 }
2252 }
2253
2254 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2255 known value for that SSA_NAME (or NULL if no value is known).
2256
2257 Propagate values from CONST_AND_COPIES into the uses, vuses and
2258 vdef_ops of STMT. */
2259
2260 static void
2261 cprop_into_stmt (gimple stmt)
2262 {
2263 use_operand_p op_p;
2264 ssa_op_iter iter;
2265
2266 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2267 cprop_operand (stmt, op_p);
2268 }
2269
2270 /* Optimize the statement pointed to by iterator SI.
2271
2272 We try to perform some simplistic global redundancy elimination and
2273 constant propagation:
2274
2275 1- To detect global redundancy, we keep track of expressions that have
2276 been computed in this block and its dominators. If we find that the
2277 same expression is computed more than once, we eliminate repeated
2278 computations by using the target of the first one.
2279
2280 2- Constant values and copy assignments. This is used to do very
2281 simplistic constant and copy propagation. When a constant or copy
2282 assignment is found, we map the value on the RHS of the assignment to
2283 the variable in the LHS in the CONST_AND_COPIES table. */
2284
2285 static void
2286 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2287 {
2288 gimple stmt, old_stmt;
2289 bool may_optimize_p;
2290 bool modified_p = false;
2291
2292 old_stmt = stmt = gsi_stmt (si);
2293
2294 if (dump_file && (dump_flags & TDF_DETAILS))
2295 {
2296 fprintf (dump_file, "Optimizing statement ");
2297 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2298 }
2299
2300 if (gimple_code (stmt) == GIMPLE_COND)
2301 canonicalize_comparison (stmt);
2302
2303 update_stmt_if_modified (stmt);
2304 opt_stats.num_stmts++;
2305
2306 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2307 cprop_into_stmt (stmt);
2308
2309 /* If the statement has been modified with constant replacements,
2310 fold its RHS before checking for redundant computations. */
2311 if (gimple_modified_p (stmt))
2312 {
2313 tree rhs = NULL;
2314
2315 /* Try to fold the statement making sure that STMT is kept
2316 up to date. */
2317 if (fold_stmt (&si))
2318 {
2319 stmt = gsi_stmt (si);
2320 gimple_set_modified (stmt, true);
2321
2322 if (dump_file && (dump_flags & TDF_DETAILS))
2323 {
2324 fprintf (dump_file, " Folded to: ");
2325 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2326 }
2327 }
2328
2329 /* We only need to consider cases that can yield a gimple operand. */
2330 if (gimple_assign_single_p (stmt))
2331 rhs = gimple_assign_rhs1 (stmt);
2332 else if (gimple_code (stmt) == GIMPLE_GOTO)
2333 rhs = gimple_goto_dest (stmt);
2334 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2335 /* This should never be an ADDR_EXPR. */
2336 rhs = gimple_switch_index (stmt);
2337
2338 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2339 recompute_tree_invariant_for_addr_expr (rhs);
2340
2341 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2342 even if fold_stmt updated the stmt already and thus cleared
2343 gimple_modified_p flag on it. */
2344 modified_p = true;
2345 }
2346
2347 /* Check for redundant computations. Do this optimization only
2348 for assignments that have no volatile ops and conditionals. */
2349 may_optimize_p = (!gimple_has_side_effects (stmt)
2350 && (is_gimple_assign (stmt)
2351 || (is_gimple_call (stmt)
2352 && gimple_call_lhs (stmt) != NULL_TREE)
2353 || gimple_code (stmt) == GIMPLE_COND
2354 || gimple_code (stmt) == GIMPLE_SWITCH));
2355
2356 if (may_optimize_p)
2357 {
2358 if (gimple_code (stmt) == GIMPLE_CALL)
2359 {
2360 /* Resolve __builtin_constant_p. If it hasn't been
2361 folded to integer_one_node by now, it's fairly
2362 certain that the value simply isn't constant. */
2363 tree callee = gimple_call_fndecl (stmt);
2364 if (callee
2365 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2366 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2367 {
2368 propagate_tree_value_into_stmt (&si, integer_zero_node);
2369 stmt = gsi_stmt (si);
2370 }
2371 }
2372
2373 update_stmt_if_modified (stmt);
2374 eliminate_redundant_computations (&si);
2375 stmt = gsi_stmt (si);
2376
2377 /* Perform simple redundant store elimination. */
2378 if (gimple_assign_single_p (stmt)
2379 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2380 {
2381 tree lhs = gimple_assign_lhs (stmt);
2382 tree rhs = gimple_assign_rhs1 (stmt);
2383 tree cached_lhs;
2384 gimple new_stmt;
2385 if (TREE_CODE (rhs) == SSA_NAME)
2386 {
2387 tree tem = SSA_NAME_VALUE (rhs);
2388 if (tem)
2389 rhs = tem;
2390 }
2391 /* Build a new statement with the RHS and LHS exchanged. */
2392 if (TREE_CODE (rhs) == SSA_NAME)
2393 {
2394 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2395 new_stmt = gimple_build_assign (rhs, lhs);
2396 SSA_NAME_DEF_STMT (rhs) = defstmt;
2397 }
2398 else
2399 new_stmt = gimple_build_assign (rhs, lhs);
2400 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2401 cached_lhs = lookup_avail_expr (new_stmt, false);
2402 if (cached_lhs
2403 && rhs == cached_lhs)
2404 {
2405 basic_block bb = gimple_bb (stmt);
2406 unlink_stmt_vdef (stmt);
2407 if (gsi_remove (&si, true))
2408 {
2409 bitmap_set_bit (need_eh_cleanup, bb->index);
2410 if (dump_file && (dump_flags & TDF_DETAILS))
2411 fprintf (dump_file, " Flagged to clear EH edges.\n");
2412 }
2413 release_defs (stmt);
2414 return;
2415 }
2416 }
2417 }
2418
2419 /* Record any additional equivalences created by this statement. */
2420 if (is_gimple_assign (stmt))
2421 record_equivalences_from_stmt (stmt, may_optimize_p);
2422
2423 /* If STMT is a COND_EXPR and it was modified, then we may know
2424 where it goes. If that is the case, then mark the CFG as altered.
2425
2426 This will cause us to later call remove_unreachable_blocks and
2427 cleanup_tree_cfg when it is safe to do so. It is not safe to
2428 clean things up here since removal of edges and such can trigger
2429 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2430 the manager.
2431
2432 That's all fine and good, except that once SSA_NAMEs are released
2433 to the manager, we must not call create_ssa_name until all references
2434 to released SSA_NAMEs have been eliminated.
2435
2436 All references to the deleted SSA_NAMEs can not be eliminated until
2437 we remove unreachable blocks.
2438
2439 We can not remove unreachable blocks until after we have completed
2440 any queued jump threading.
2441
2442 We can not complete any queued jump threads until we have taken
2443 appropriate variables out of SSA form. Taking variables out of
2444 SSA form can call create_ssa_name and thus we lose.
2445
2446 Ultimately I suspect we're going to need to change the interface
2447 into the SSA_NAME manager. */
2448 if (gimple_modified_p (stmt) || modified_p)
2449 {
2450 tree val = NULL;
2451
2452 update_stmt_if_modified (stmt);
2453
2454 if (gimple_code (stmt) == GIMPLE_COND)
2455 val = fold_binary_loc (gimple_location (stmt),
2456 gimple_cond_code (stmt), boolean_type_node,
2457 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2458 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2459 val = gimple_switch_index (stmt);
2460
2461 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2462 cfg_altered = true;
2463
2464 /* If we simplified a statement in such a way as to be shown that it
2465 cannot trap, update the eh information and the cfg to match. */
2466 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2467 {
2468 bitmap_set_bit (need_eh_cleanup, bb->index);
2469 if (dump_file && (dump_flags & TDF_DETAILS))
2470 fprintf (dump_file, " Flagged to clear EH edges.\n");
2471 }
2472 }
2473 }
2474
2475 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2476 If found, return its LHS. Otherwise insert STMT in the table and
2477 return NULL_TREE.
2478
2479 Also, when an expression is first inserted in the table, it is also
2480 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2481 we finish processing this block and its children. */
2482
2483 static tree
2484 lookup_avail_expr (gimple stmt, bool insert)
2485 {
2486 expr_hash_elt **slot;
2487 tree lhs;
2488 tree temp;
2489 struct expr_hash_elt element;
2490
2491 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2492 if (gimple_code (stmt) == GIMPLE_PHI)
2493 lhs = gimple_phi_result (stmt);
2494 else
2495 lhs = gimple_get_lhs (stmt);
2496
2497 initialize_hash_element (stmt, lhs, &element);
2498
2499 if (dump_file && (dump_flags & TDF_DETAILS))
2500 {
2501 fprintf (dump_file, "LKUP ");
2502 print_expr_hash_elt (dump_file, &element);
2503 }
2504
2505 /* Don't bother remembering constant assignments and copy operations.
2506 Constants and copy operations are handled by the constant/copy propagator
2507 in optimize_stmt. */
2508 if (element.expr.kind == EXPR_SINGLE
2509 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2510 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2511 return NULL_TREE;
2512
2513 /* Finally try to find the expression in the main expression hash table. */
2514 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2515 if (slot == NULL)
2516 {
2517 free_expr_hash_elt_contents (&element);
2518 return NULL_TREE;
2519 }
2520 else if (*slot == NULL)
2521 {
2522 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2523 *element2 = element;
2524 element2->stamp = element2;
2525 *slot = element2;
2526
2527 if (dump_file && (dump_flags & TDF_DETAILS))
2528 {
2529 fprintf (dump_file, "2>>> ");
2530 print_expr_hash_elt (dump_file, element2);
2531 }
2532
2533 avail_exprs_stack.safe_push (element2);
2534 return NULL_TREE;
2535 }
2536 else
2537 free_expr_hash_elt_contents (&element);
2538
2539 /* Extract the LHS of the assignment so that it can be used as the current
2540 definition of another variable. */
2541 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2542
2543 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2544 use the value from the const_and_copies table. */
2545 if (TREE_CODE (lhs) == SSA_NAME)
2546 {
2547 temp = SSA_NAME_VALUE (lhs);
2548 if (temp)
2549 lhs = temp;
2550 }
2551
2552 if (dump_file && (dump_flags & TDF_DETAILS))
2553 {
2554 fprintf (dump_file, "FIND: ");
2555 print_generic_expr (dump_file, lhs, 0);
2556 fprintf (dump_file, "\n");
2557 }
2558
2559 return lhs;
2560 }
2561
2562 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2563 for expressions using the code of the expression and the SSA numbers of
2564 its operands. */
2565
2566 static hashval_t
2567 avail_expr_hash (const void *p)
2568 {
2569 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2570 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2571 tree vuse;
2572 hashval_t val = 0;
2573
2574 val = iterative_hash_hashable_expr (expr, val);
2575
2576 /* If the hash table entry is not associated with a statement, then we
2577 can just hash the expression and not worry about virtual operands
2578 and such. */
2579 if (!stmt)
2580 return val;
2581
2582 /* Add the SSA version numbers of the vuse operand. This is important
2583 because compound variables like arrays are not renamed in the
2584 operands. Rather, the rename is done on the virtual variable
2585 representing all the elements of the array. */
2586 if ((vuse = gimple_vuse (stmt)))
2587 val = iterative_hash_expr (vuse, val);
2588
2589 return val;
2590 }
2591
2592 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2593 up degenerate PHIs created by or exposed by jump threading. */
2594
2595 /* Given a statement STMT, which is either a PHI node or an assignment,
2596 remove it from the IL. */
2597
2598 static void
2599 remove_stmt_or_phi (gimple stmt)
2600 {
2601 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2602
2603 if (gimple_code (stmt) == GIMPLE_PHI)
2604 remove_phi_node (&gsi, true);
2605 else
2606 {
2607 gsi_remove (&gsi, true);
2608 release_defs (stmt);
2609 }
2610 }
2611
2612 /* Given a statement STMT, which is either a PHI node or an assignment,
2613 return the "rhs" of the node, in the case of a non-degenerate
2614 phi, NULL is returned. */
2615
2616 static tree
2617 get_rhs_or_phi_arg (gimple stmt)
2618 {
2619 if (gimple_code (stmt) == GIMPLE_PHI)
2620 return degenerate_phi_result (stmt);
2621 else if (gimple_assign_single_p (stmt))
2622 return gimple_assign_rhs1 (stmt);
2623 else
2624 gcc_unreachable ();
2625 }
2626
2627
2628 /* Given a statement STMT, which is either a PHI node or an assignment,
2629 return the "lhs" of the node. */
2630
2631 static tree
2632 get_lhs_or_phi_result (gimple stmt)
2633 {
2634 if (gimple_code (stmt) == GIMPLE_PHI)
2635 return gimple_phi_result (stmt);
2636 else if (is_gimple_assign (stmt))
2637 return gimple_assign_lhs (stmt);
2638 else
2639 gcc_unreachable ();
2640 }
2641
2642 /* Propagate RHS into all uses of LHS (when possible).
2643
2644 RHS and LHS are derived from STMT, which is passed in solely so
2645 that we can remove it if propagation is successful.
2646
2647 When propagating into a PHI node or into a statement which turns
2648 into a trivial copy or constant initialization, set the
2649 appropriate bit in INTERESTING_NAMEs so that we will visit those
2650 nodes as well in an effort to pick up secondary optimization
2651 opportunities. */
2652
2653 static void
2654 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2655 {
2656 /* First verify that propagation is valid. */
2657 if (may_propagate_copy (lhs, rhs))
2658 {
2659 use_operand_p use_p;
2660 imm_use_iterator iter;
2661 gimple use_stmt;
2662 bool all = true;
2663
2664 /* Dump details. */
2665 if (dump_file && (dump_flags & TDF_DETAILS))
2666 {
2667 fprintf (dump_file, " Replacing '");
2668 print_generic_expr (dump_file, lhs, dump_flags);
2669 fprintf (dump_file, "' with %s '",
2670 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2671 print_generic_expr (dump_file, rhs, dump_flags);
2672 fprintf (dump_file, "'\n");
2673 }
2674
2675 /* Walk over every use of LHS and try to replace the use with RHS.
2676 At this point the only reason why such a propagation would not
2677 be successful would be if the use occurs in an ASM_EXPR. */
2678 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2679 {
2680 /* Leave debug stmts alone. If we succeed in propagating
2681 all non-debug uses, we'll drop the DEF, and propagation
2682 into debug stmts will occur then. */
2683 if (gimple_debug_bind_p (use_stmt))
2684 continue;
2685
2686 /* It's not always safe to propagate into an ASM_EXPR. */
2687 if (gimple_code (use_stmt) == GIMPLE_ASM
2688 && ! may_propagate_copy_into_asm (lhs))
2689 {
2690 all = false;
2691 continue;
2692 }
2693
2694 /* It's not ok to propagate into the definition stmt of RHS.
2695 <bb 9>:
2696 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2697 g_67.1_6 = prephitmp.12_36;
2698 goto <bb 9>;
2699 While this is strictly all dead code we do not want to
2700 deal with this here. */
2701 if (TREE_CODE (rhs) == SSA_NAME
2702 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2703 {
2704 all = false;
2705 continue;
2706 }
2707
2708 /* Dump details. */
2709 if (dump_file && (dump_flags & TDF_DETAILS))
2710 {
2711 fprintf (dump_file, " Original statement:");
2712 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2713 }
2714
2715 /* Propagate the RHS into this use of the LHS. */
2716 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2717 propagate_value (use_p, rhs);
2718
2719 /* Special cases to avoid useless calls into the folding
2720 routines, operand scanning, etc.
2721
2722 Propagation into a PHI may cause the PHI to become
2723 a degenerate, so mark the PHI as interesting. No other
2724 actions are necessary. */
2725 if (gimple_code (use_stmt) == GIMPLE_PHI)
2726 {
2727 tree result;
2728
2729 /* Dump details. */
2730 if (dump_file && (dump_flags & TDF_DETAILS))
2731 {
2732 fprintf (dump_file, " Updated statement:");
2733 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2734 }
2735
2736 result = get_lhs_or_phi_result (use_stmt);
2737 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2738 continue;
2739 }
2740
2741 /* From this point onward we are propagating into a
2742 real statement. Folding may (or may not) be possible,
2743 we may expose new operands, expose dead EH edges,
2744 etc. */
2745 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2746 cannot fold a call that simplifies to a constant,
2747 because the GIMPLE_CALL must be replaced by a
2748 GIMPLE_ASSIGN, and there is no way to effect such a
2749 transformation in-place. We might want to consider
2750 using the more general fold_stmt here. */
2751 {
2752 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2753 fold_stmt_inplace (&gsi);
2754 }
2755
2756 /* Sometimes propagation can expose new operands to the
2757 renamer. */
2758 update_stmt (use_stmt);
2759
2760 /* Dump details. */
2761 if (dump_file && (dump_flags & TDF_DETAILS))
2762 {
2763 fprintf (dump_file, " Updated statement:");
2764 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2765 }
2766
2767 /* If we replaced a variable index with a constant, then
2768 we would need to update the invariant flag for ADDR_EXPRs. */
2769 if (gimple_assign_single_p (use_stmt)
2770 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2771 recompute_tree_invariant_for_addr_expr
2772 (gimple_assign_rhs1 (use_stmt));
2773
2774 /* If we cleaned up EH information from the statement,
2775 mark its containing block as needing EH cleanups. */
2776 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2777 {
2778 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2779 if (dump_file && (dump_flags & TDF_DETAILS))
2780 fprintf (dump_file, " Flagged to clear EH edges.\n");
2781 }
2782
2783 /* Propagation may expose new trivial copy/constant propagation
2784 opportunities. */
2785 if (gimple_assign_single_p (use_stmt)
2786 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2787 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2788 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2789 {
2790 tree result = get_lhs_or_phi_result (use_stmt);
2791 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2792 }
2793
2794 /* Propagation into these nodes may make certain edges in
2795 the CFG unexecutable. We want to identify them as PHI nodes
2796 at the destination of those unexecutable edges may become
2797 degenerates. */
2798 else if (gimple_code (use_stmt) == GIMPLE_COND
2799 || gimple_code (use_stmt) == GIMPLE_SWITCH
2800 || gimple_code (use_stmt) == GIMPLE_GOTO)
2801 {
2802 tree val;
2803
2804 if (gimple_code (use_stmt) == GIMPLE_COND)
2805 val = fold_binary_loc (gimple_location (use_stmt),
2806 gimple_cond_code (use_stmt),
2807 boolean_type_node,
2808 gimple_cond_lhs (use_stmt),
2809 gimple_cond_rhs (use_stmt));
2810 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2811 val = gimple_switch_index (use_stmt);
2812 else
2813 val = gimple_goto_dest (use_stmt);
2814
2815 if (val && is_gimple_min_invariant (val))
2816 {
2817 basic_block bb = gimple_bb (use_stmt);
2818 edge te = find_taken_edge (bb, val);
2819 edge_iterator ei;
2820 edge e;
2821 gimple_stmt_iterator gsi, psi;
2822
2823 /* Remove all outgoing edges except TE. */
2824 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2825 {
2826 if (e != te)
2827 {
2828 /* Mark all the PHI nodes at the destination of
2829 the unexecutable edge as interesting. */
2830 for (psi = gsi_start_phis (e->dest);
2831 !gsi_end_p (psi);
2832 gsi_next (&psi))
2833 {
2834 gimple phi = gsi_stmt (psi);
2835
2836 tree result = gimple_phi_result (phi);
2837 int version = SSA_NAME_VERSION (result);
2838
2839 bitmap_set_bit (interesting_names, version);
2840 }
2841
2842 te->probability += e->probability;
2843
2844 te->count += e->count;
2845 remove_edge (e);
2846 cfg_altered = true;
2847 }
2848 else
2849 ei_next (&ei);
2850 }
2851
2852 gsi = gsi_last_bb (gimple_bb (use_stmt));
2853 gsi_remove (&gsi, true);
2854
2855 /* And fixup the flags on the single remaining edge. */
2856 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2857 te->flags &= ~EDGE_ABNORMAL;
2858 te->flags |= EDGE_FALLTHRU;
2859 if (te->probability > REG_BR_PROB_BASE)
2860 te->probability = REG_BR_PROB_BASE;
2861 }
2862 }
2863 }
2864
2865 /* Ensure there is nothing else to do. */
2866 gcc_assert (!all || has_zero_uses (lhs));
2867
2868 /* If we were able to propagate away all uses of LHS, then
2869 we can remove STMT. */
2870 if (all)
2871 remove_stmt_or_phi (stmt);
2872 }
2873 }
2874
2875 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2876 a statement that is a trivial copy or constant initialization.
2877
2878 Attempt to eliminate T by propagating its RHS into all uses of
2879 its LHS. This may in turn set new bits in INTERESTING_NAMES
2880 for nodes we want to revisit later.
2881
2882 All exit paths should clear INTERESTING_NAMES for the result
2883 of STMT. */
2884
2885 static void
2886 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2887 {
2888 tree lhs = get_lhs_or_phi_result (stmt);
2889 tree rhs;
2890 int version = SSA_NAME_VERSION (lhs);
2891
2892 /* If the LHS of this statement or PHI has no uses, then we can
2893 just eliminate it. This can occur if, for example, the PHI
2894 was created by block duplication due to threading and its only
2895 use was in the conditional at the end of the block which was
2896 deleted. */
2897 if (has_zero_uses (lhs))
2898 {
2899 bitmap_clear_bit (interesting_names, version);
2900 remove_stmt_or_phi (stmt);
2901 return;
2902 }
2903
2904 /* Get the RHS of the assignment or PHI node if the PHI is a
2905 degenerate. */
2906 rhs = get_rhs_or_phi_arg (stmt);
2907 if (!rhs)
2908 {
2909 bitmap_clear_bit (interesting_names, version);
2910 return;
2911 }
2912
2913 if (!virtual_operand_p (lhs))
2914 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2915 else
2916 {
2917 gimple use_stmt;
2918 imm_use_iterator iter;
2919 use_operand_p use_p;
2920 /* For virtual operands we have to propagate into all uses as
2921 otherwise we will create overlapping life-ranges. */
2922 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2923 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2924 SET_USE (use_p, rhs);
2925 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2926 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2927 remove_stmt_or_phi (stmt);
2928 }
2929
2930 /* Note that STMT may well have been deleted by now, so do
2931 not access it, instead use the saved version # to clear
2932 T's entry in the worklist. */
2933 bitmap_clear_bit (interesting_names, version);
2934 }
2935
2936 /* The first phase in degenerate PHI elimination.
2937
2938 Eliminate the degenerate PHIs in BB, then recurse on the
2939 dominator children of BB. */
2940
2941 static void
2942 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2943 {
2944 gimple_stmt_iterator gsi;
2945 basic_block son;
2946
2947 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2948 {
2949 gimple phi = gsi_stmt (gsi);
2950
2951 eliminate_const_or_copy (phi, interesting_names);
2952 }
2953
2954 /* Recurse into the dominator children of BB. */
2955 for (son = first_dom_son (CDI_DOMINATORS, bb);
2956 son;
2957 son = next_dom_son (CDI_DOMINATORS, son))
2958 eliminate_degenerate_phis_1 (son, interesting_names);
2959 }
2960
2961
2962 /* A very simple pass to eliminate degenerate PHI nodes from the
2963 IL. This is meant to be fast enough to be able to be run several
2964 times in the optimization pipeline.
2965
2966 Certain optimizations, particularly those which duplicate blocks
2967 or remove edges from the CFG can create or expose PHIs which are
2968 trivial copies or constant initializations.
2969
2970 While we could pick up these optimizations in DOM or with the
2971 combination of copy-prop and CCP, those solutions are far too
2972 heavy-weight for our needs.
2973
2974 This implementation has two phases so that we can efficiently
2975 eliminate the first order degenerate PHIs and second order
2976 degenerate PHIs.
2977
2978 The first phase performs a dominator walk to identify and eliminate
2979 the vast majority of the degenerate PHIs. When a degenerate PHI
2980 is identified and eliminated any affected statements or PHIs
2981 are put on a worklist.
2982
2983 The second phase eliminates degenerate PHIs and trivial copies
2984 or constant initializations using the worklist. This is how we
2985 pick up the secondary optimization opportunities with minimal
2986 cost. */
2987
2988 namespace {
2989
2990 const pass_data pass_data_phi_only_cprop =
2991 {
2992 GIMPLE_PASS, /* type */
2993 "phicprop", /* name */
2994 OPTGROUP_NONE, /* optinfo_flags */
2995 true, /* has_execute */
2996 TV_TREE_PHI_CPROP, /* tv_id */
2997 ( PROP_cfg | PROP_ssa ), /* properties_required */
2998 0, /* properties_provided */
2999 0, /* properties_destroyed */
3000 0, /* todo_flags_start */
3001 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3002 };
3003
3004 class pass_phi_only_cprop : public gimple_opt_pass
3005 {
3006 public:
3007 pass_phi_only_cprop (gcc::context *ctxt)
3008 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3009 {}
3010
3011 /* opt_pass methods: */
3012 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3013 virtual bool gate (function *) { return flag_tree_dom != 0; }
3014 virtual unsigned int execute (function *);
3015
3016 }; // class pass_phi_only_cprop
3017
3018 unsigned int
3019 pass_phi_only_cprop::execute (function *fun)
3020 {
3021 bitmap interesting_names;
3022 bitmap interesting_names1;
3023
3024 /* Bitmap of blocks which need EH information updated. We can not
3025 update it on-the-fly as doing so invalidates the dominator tree. */
3026 need_eh_cleanup = BITMAP_ALLOC (NULL);
3027
3028 /* INTERESTING_NAMES is effectively our worklist, indexed by
3029 SSA_NAME_VERSION.
3030
3031 A set bit indicates that the statement or PHI node which
3032 defines the SSA_NAME should be (re)examined to determine if
3033 it has become a degenerate PHI or trivial const/copy propagation
3034 opportunity.
3035
3036 Experiments have show we generally get better compilation
3037 time behavior with bitmaps rather than sbitmaps. */
3038 interesting_names = BITMAP_ALLOC (NULL);
3039 interesting_names1 = BITMAP_ALLOC (NULL);
3040
3041 calculate_dominance_info (CDI_DOMINATORS);
3042 cfg_altered = false;
3043
3044 /* First phase. Eliminate degenerate PHIs via a dominator
3045 walk of the CFG.
3046
3047 Experiments have indicated that we generally get better
3048 compile-time behavior by visiting blocks in the first
3049 phase in dominator order. Presumably this is because walking
3050 in dominator order leaves fewer PHIs for later examination
3051 by the worklist phase. */
3052 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3053 interesting_names);
3054
3055 /* Second phase. Eliminate second order degenerate PHIs as well
3056 as trivial copies or constant initializations identified by
3057 the first phase or this phase. Basically we keep iterating
3058 until our set of INTERESTING_NAMEs is empty. */
3059 while (!bitmap_empty_p (interesting_names))
3060 {
3061 unsigned int i;
3062 bitmap_iterator bi;
3063
3064 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3065 changed during the loop. Copy it to another bitmap and
3066 use that. */
3067 bitmap_copy (interesting_names1, interesting_names);
3068
3069 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3070 {
3071 tree name = ssa_name (i);
3072
3073 /* Ignore SSA_NAMEs that have been released because
3074 their defining statement was deleted (unreachable). */
3075 if (name)
3076 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3077 interesting_names);
3078 }
3079 }
3080
3081 if (cfg_altered)
3082 {
3083 free_dominance_info (CDI_DOMINATORS);
3084 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3085 loops_state_set (LOOPS_NEED_FIXUP);
3086 }
3087
3088 /* Propagation of const and copies may make some EH edges dead. Purge
3089 such edges from the CFG as needed. */
3090 if (!bitmap_empty_p (need_eh_cleanup))
3091 {
3092 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3093 BITMAP_FREE (need_eh_cleanup);
3094 }
3095
3096 BITMAP_FREE (interesting_names);
3097 BITMAP_FREE (interesting_names1);
3098 return 0;
3099 }
3100
3101 } // anon namespace
3102
3103 gimple_opt_pass *
3104 make_pass_phi_only_cprop (gcc::context *ctxt)
3105 {
3106 return new pass_phi_only_cprop (ctxt);
3107 }