]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-dom.c
tree-ssa.h: New.
[thirdparty/gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "cfgloop.h"
31 #include "function.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-ssa.h"
34 #include "domwalk.h"
35 #include "tree-pass.h"
36 #include "tree-ssa-propagate.h"
37 #include "langhooks.h"
38 #include "params.h"
39
40 /* This file implements optimizations on the dominator tree. */
41
42 /* Representation of a "naked" right-hand-side expression, to be used
43 in recording available expressions in the expression hash table. */
44
45 enum expr_kind
46 {
47 EXPR_SINGLE,
48 EXPR_UNARY,
49 EXPR_BINARY,
50 EXPR_TERNARY,
51 EXPR_CALL,
52 EXPR_PHI
53 };
54
55 struct hashable_expr
56 {
57 tree type;
58 enum expr_kind kind;
59 union {
60 struct { tree rhs; } single;
61 struct { enum tree_code op; tree opnd; } unary;
62 struct { enum tree_code op; tree opnd0, opnd1; } binary;
63 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
64 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
65 struct { size_t nargs; tree *args; } phi;
66 } ops;
67 };
68
69 /* Structure for recording known values of a conditional expression
70 at the exits from its block. */
71
72 typedef struct cond_equivalence_s
73 {
74 struct hashable_expr cond;
75 tree value;
76 } cond_equivalence;
77
78
79 /* Structure for recording edge equivalences as well as any pending
80 edge redirections during the dominator optimizer.
81
82 Computing and storing the edge equivalences instead of creating
83 them on-demand can save significant amounts of time, particularly
84 for pathological cases involving switch statements.
85
86 These structures live for a single iteration of the dominator
87 optimizer in the edge's AUX field. At the end of an iteration we
88 free each of these structures and update the AUX field to point
89 to any requested redirection target (the code for updating the
90 CFG and SSA graph for edge redirection expects redirection edge
91 targets to be in the AUX field for each edge. */
92
93 struct edge_info
94 {
95 /* If this edge creates a simple equivalence, the LHS and RHS of
96 the equivalence will be stored here. */
97 tree lhs;
98 tree rhs;
99
100 /* Traversing an edge may also indicate one or more particular conditions
101 are true or false. */
102 vec<cond_equivalence> cond_equivalences;
103 };
104
105 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
106 expressions it enters into the hash table along with a marker entry
107 (null). When we finish processing the block, we pop off entries and
108 remove the expressions from the global hash table until we hit the
109 marker. */
110 typedef struct expr_hash_elt * expr_hash_elt_t;
111
112 static vec<expr_hash_elt_t> avail_exprs_stack;
113
114 /* Structure for entries in the expression hash table. */
115
116 struct expr_hash_elt
117 {
118 /* The value (lhs) of this expression. */
119 tree lhs;
120
121 /* The expression (rhs) we want to record. */
122 struct hashable_expr expr;
123
124 /* The stmt pointer if this element corresponds to a statement. */
125 gimple stmt;
126
127 /* The hash value for RHS. */
128 hashval_t hash;
129
130 /* A unique stamp, typically the address of the hash
131 element itself, used in removing entries from the table. */
132 struct expr_hash_elt *stamp;
133 };
134
135 /* Hashtable helpers. */
136
137 static bool hashable_expr_equal_p (const struct hashable_expr *,
138 const struct hashable_expr *);
139 static void free_expr_hash_elt (void *);
140
141 struct expr_elt_hasher
142 {
143 typedef expr_hash_elt value_type;
144 typedef expr_hash_elt compare_type;
145 static inline hashval_t hash (const value_type *);
146 static inline bool equal (const value_type *, const compare_type *);
147 static inline void remove (value_type *);
148 };
149
150 inline hashval_t
151 expr_elt_hasher::hash (const value_type *p)
152 {
153 return p->hash;
154 }
155
156 inline bool
157 expr_elt_hasher::equal (const value_type *p1, const compare_type *p2)
158 {
159 gimple stmt1 = p1->stmt;
160 const struct hashable_expr *expr1 = &p1->expr;
161 const struct expr_hash_elt *stamp1 = p1->stamp;
162 gimple stmt2 = p2->stmt;
163 const struct hashable_expr *expr2 = &p2->expr;
164 const struct expr_hash_elt *stamp2 = p2->stamp;
165
166 /* This case should apply only when removing entries from the table. */
167 if (stamp1 == stamp2)
168 return true;
169
170 /* FIXME tuples:
171 We add stmts to a hash table and them modify them. To detect the case
172 that we modify a stmt and then search for it, we assume that the hash
173 is always modified by that change.
174 We have to fully check why this doesn't happen on trunk or rewrite
175 this in a more reliable (and easier to understand) way. */
176 if (((const struct expr_hash_elt *)p1)->hash
177 != ((const struct expr_hash_elt *)p2)->hash)
178 return false;
179
180 /* In case of a collision, both RHS have to be identical and have the
181 same VUSE operands. */
182 if (hashable_expr_equal_p (expr1, expr2)
183 && types_compatible_p (expr1->type, expr2->type))
184 {
185 /* Note that STMT1 and/or STMT2 may be NULL. */
186 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
187 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
188 }
189
190 return false;
191 }
192
193 /* Delete an expr_hash_elt and reclaim its storage. */
194
195 inline void
196 expr_elt_hasher::remove (value_type *element)
197 {
198 free_expr_hash_elt (element);
199 }
200
201 /* Hash table with expressions made available during the renaming process.
202 When an assignment of the form X_i = EXPR is found, the statement is
203 stored in this table. If the same expression EXPR is later found on the
204 RHS of another statement, it is replaced with X_i (thus performing
205 global redundancy elimination). Similarly as we pass through conditionals
206 we record the conditional itself as having either a true or false value
207 in this table. */
208 static hash_table <expr_elt_hasher> avail_exprs;
209
210 /* Stack of dest,src pairs that need to be restored during finalization.
211
212 A NULL entry is used to mark the end of pairs which need to be
213 restored during finalization of this block. */
214 static vec<tree> const_and_copies_stack;
215
216 /* Track whether or not we have changed the control flow graph. */
217 static bool cfg_altered;
218
219 /* Bitmap of blocks that have had EH statements cleaned. We should
220 remove their dead edges eventually. */
221 static bitmap need_eh_cleanup;
222
223 /* Statistics for dominator optimizations. */
224 struct opt_stats_d
225 {
226 long num_stmts;
227 long num_exprs_considered;
228 long num_re;
229 long num_const_prop;
230 long num_copy_prop;
231 };
232
233 static struct opt_stats_d opt_stats;
234
235 /* Local functions. */
236 static void optimize_stmt (basic_block, gimple_stmt_iterator);
237 static tree lookup_avail_expr (gimple, bool);
238 static hashval_t avail_expr_hash (const void *);
239 static void htab_statistics (FILE *, hash_table <expr_elt_hasher>);
240 static void record_cond (cond_equivalence *);
241 static void record_const_or_copy (tree, tree);
242 static void record_equality (tree, tree);
243 static void record_equivalences_from_phis (basic_block);
244 static void record_equivalences_from_incoming_edge (basic_block);
245 static void eliminate_redundant_computations (gimple_stmt_iterator *);
246 static void record_equivalences_from_stmt (gimple, int);
247 static void dom_thread_across_edge (struct dom_walk_data *, edge);
248 static void dom_opt_leave_block (struct dom_walk_data *, basic_block);
249 static void dom_opt_enter_block (struct dom_walk_data *, basic_block);
250 static void remove_local_expressions_from_table (void);
251 static void restore_vars_to_original_value (void);
252 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
253
254
255 /* Given a statement STMT, initialize the hash table element pointed to
256 by ELEMENT. */
257
258 static void
259 initialize_hash_element (gimple stmt, tree lhs,
260 struct expr_hash_elt *element)
261 {
262 enum gimple_code code = gimple_code (stmt);
263 struct hashable_expr *expr = &element->expr;
264
265 if (code == GIMPLE_ASSIGN)
266 {
267 enum tree_code subcode = gimple_assign_rhs_code (stmt);
268
269 switch (get_gimple_rhs_class (subcode))
270 {
271 case GIMPLE_SINGLE_RHS:
272 expr->kind = EXPR_SINGLE;
273 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
274 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
275 break;
276 case GIMPLE_UNARY_RHS:
277 expr->kind = EXPR_UNARY;
278 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
279 expr->ops.unary.op = subcode;
280 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
281 break;
282 case GIMPLE_BINARY_RHS:
283 expr->kind = EXPR_BINARY;
284 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
285 expr->ops.binary.op = subcode;
286 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
287 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
288 break;
289 case GIMPLE_TERNARY_RHS:
290 expr->kind = EXPR_TERNARY;
291 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
292 expr->ops.ternary.op = subcode;
293 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
294 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
295 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
296 break;
297 default:
298 gcc_unreachable ();
299 }
300 }
301 else if (code == GIMPLE_COND)
302 {
303 expr->type = boolean_type_node;
304 expr->kind = EXPR_BINARY;
305 expr->ops.binary.op = gimple_cond_code (stmt);
306 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
307 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
308 }
309 else if (code == GIMPLE_CALL)
310 {
311 size_t nargs = gimple_call_num_args (stmt);
312 size_t i;
313
314 gcc_assert (gimple_call_lhs (stmt));
315
316 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
317 expr->kind = EXPR_CALL;
318 expr->ops.call.fn_from = stmt;
319
320 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
321 expr->ops.call.pure = true;
322 else
323 expr->ops.call.pure = false;
324
325 expr->ops.call.nargs = nargs;
326 expr->ops.call.args = XCNEWVEC (tree, nargs);
327 for (i = 0; i < nargs; i++)
328 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
329 }
330 else if (code == GIMPLE_SWITCH)
331 {
332 expr->type = TREE_TYPE (gimple_switch_index (stmt));
333 expr->kind = EXPR_SINGLE;
334 expr->ops.single.rhs = gimple_switch_index (stmt);
335 }
336 else if (code == GIMPLE_GOTO)
337 {
338 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
339 expr->kind = EXPR_SINGLE;
340 expr->ops.single.rhs = gimple_goto_dest (stmt);
341 }
342 else if (code == GIMPLE_PHI)
343 {
344 size_t nargs = gimple_phi_num_args (stmt);
345 size_t i;
346
347 expr->type = TREE_TYPE (gimple_phi_result (stmt));
348 expr->kind = EXPR_PHI;
349 expr->ops.phi.nargs = nargs;
350 expr->ops.phi.args = XCNEWVEC (tree, nargs);
351
352 for (i = 0; i < nargs; i++)
353 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
354 }
355 else
356 gcc_unreachable ();
357
358 element->lhs = lhs;
359 element->stmt = stmt;
360 element->hash = avail_expr_hash (element);
361 element->stamp = element;
362 }
363
364 /* Given a conditional expression COND as a tree, initialize
365 a hashable_expr expression EXPR. The conditional must be a
366 comparison or logical negation. A constant or a variable is
367 not permitted. */
368
369 static void
370 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
371 {
372 expr->type = boolean_type_node;
373
374 if (COMPARISON_CLASS_P (cond))
375 {
376 expr->kind = EXPR_BINARY;
377 expr->ops.binary.op = TREE_CODE (cond);
378 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
379 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
380 }
381 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
382 {
383 expr->kind = EXPR_UNARY;
384 expr->ops.unary.op = TRUTH_NOT_EXPR;
385 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
386 }
387 else
388 gcc_unreachable ();
389 }
390
391 /* Given a hashable_expr expression EXPR and an LHS,
392 initialize the hash table element pointed to by ELEMENT. */
393
394 static void
395 initialize_hash_element_from_expr (struct hashable_expr *expr,
396 tree lhs,
397 struct expr_hash_elt *element)
398 {
399 element->expr = *expr;
400 element->lhs = lhs;
401 element->stmt = NULL;
402 element->hash = avail_expr_hash (element);
403 element->stamp = element;
404 }
405
406 /* Compare two hashable_expr structures for equivalence.
407 They are considered equivalent when the the expressions
408 they denote must necessarily be equal. The logic is intended
409 to follow that of operand_equal_p in fold-const.c */
410
411 static bool
412 hashable_expr_equal_p (const struct hashable_expr *expr0,
413 const struct hashable_expr *expr1)
414 {
415 tree type0 = expr0->type;
416 tree type1 = expr1->type;
417
418 /* If either type is NULL, there is nothing to check. */
419 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
420 return false;
421
422 /* If both types don't have the same signedness, precision, and mode,
423 then we can't consider them equal. */
424 if (type0 != type1
425 && (TREE_CODE (type0) == ERROR_MARK
426 || TREE_CODE (type1) == ERROR_MARK
427 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
428 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
429 || TYPE_MODE (type0) != TYPE_MODE (type1)))
430 return false;
431
432 if (expr0->kind != expr1->kind)
433 return false;
434
435 switch (expr0->kind)
436 {
437 case EXPR_SINGLE:
438 return operand_equal_p (expr0->ops.single.rhs,
439 expr1->ops.single.rhs, 0);
440
441 case EXPR_UNARY:
442 if (expr0->ops.unary.op != expr1->ops.unary.op)
443 return false;
444
445 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
446 || expr0->ops.unary.op == NON_LVALUE_EXPR)
447 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
448 return false;
449
450 return operand_equal_p (expr0->ops.unary.opnd,
451 expr1->ops.unary.opnd, 0);
452
453 case EXPR_BINARY:
454 if (expr0->ops.binary.op != expr1->ops.binary.op)
455 return false;
456
457 if (operand_equal_p (expr0->ops.binary.opnd0,
458 expr1->ops.binary.opnd0, 0)
459 && operand_equal_p (expr0->ops.binary.opnd1,
460 expr1->ops.binary.opnd1, 0))
461 return true;
462
463 /* For commutative ops, allow the other order. */
464 return (commutative_tree_code (expr0->ops.binary.op)
465 && operand_equal_p (expr0->ops.binary.opnd0,
466 expr1->ops.binary.opnd1, 0)
467 && operand_equal_p (expr0->ops.binary.opnd1,
468 expr1->ops.binary.opnd0, 0));
469
470 case EXPR_TERNARY:
471 if (expr0->ops.ternary.op != expr1->ops.ternary.op
472 || !operand_equal_p (expr0->ops.ternary.opnd2,
473 expr1->ops.ternary.opnd2, 0))
474 return false;
475
476 if (operand_equal_p (expr0->ops.ternary.opnd0,
477 expr1->ops.ternary.opnd0, 0)
478 && operand_equal_p (expr0->ops.ternary.opnd1,
479 expr1->ops.ternary.opnd1, 0))
480 return true;
481
482 /* For commutative ops, allow the other order. */
483 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
484 && operand_equal_p (expr0->ops.ternary.opnd0,
485 expr1->ops.ternary.opnd1, 0)
486 && operand_equal_p (expr0->ops.ternary.opnd1,
487 expr1->ops.ternary.opnd0, 0));
488
489 case EXPR_CALL:
490 {
491 size_t i;
492
493 /* If the calls are to different functions, then they
494 clearly cannot be equal. */
495 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
496 expr1->ops.call.fn_from))
497 return false;
498
499 if (! expr0->ops.call.pure)
500 return false;
501
502 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
503 return false;
504
505 for (i = 0; i < expr0->ops.call.nargs; i++)
506 if (! operand_equal_p (expr0->ops.call.args[i],
507 expr1->ops.call.args[i], 0))
508 return false;
509
510 return true;
511 }
512
513 case EXPR_PHI:
514 {
515 size_t i;
516
517 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
518 return false;
519
520 for (i = 0; i < expr0->ops.phi.nargs; i++)
521 if (! operand_equal_p (expr0->ops.phi.args[i],
522 expr1->ops.phi.args[i], 0))
523 return false;
524
525 return true;
526 }
527
528 default:
529 gcc_unreachable ();
530 }
531 }
532
533 /* Compute a hash value for a hashable_expr value EXPR and a
534 previously accumulated hash value VAL. If two hashable_expr
535 values compare equal with hashable_expr_equal_p, they must
536 hash to the same value, given an identical value of VAL.
537 The logic is intended to follow iterative_hash_expr in tree.c. */
538
539 static hashval_t
540 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
541 {
542 switch (expr->kind)
543 {
544 case EXPR_SINGLE:
545 val = iterative_hash_expr (expr->ops.single.rhs, val);
546 break;
547
548 case EXPR_UNARY:
549 val = iterative_hash_object (expr->ops.unary.op, val);
550
551 /* Make sure to include signedness in the hash computation.
552 Don't hash the type, that can lead to having nodes which
553 compare equal according to operand_equal_p, but which
554 have different hash codes. */
555 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
556 || expr->ops.unary.op == NON_LVALUE_EXPR)
557 val += TYPE_UNSIGNED (expr->type);
558
559 val = iterative_hash_expr (expr->ops.unary.opnd, val);
560 break;
561
562 case EXPR_BINARY:
563 val = iterative_hash_object (expr->ops.binary.op, val);
564 if (commutative_tree_code (expr->ops.binary.op))
565 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
566 expr->ops.binary.opnd1, val);
567 else
568 {
569 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
570 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
571 }
572 break;
573
574 case EXPR_TERNARY:
575 val = iterative_hash_object (expr->ops.ternary.op, val);
576 if (commutative_ternary_tree_code (expr->ops.ternary.op))
577 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
578 expr->ops.ternary.opnd1, val);
579 else
580 {
581 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
582 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
583 }
584 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
585 break;
586
587 case EXPR_CALL:
588 {
589 size_t i;
590 enum tree_code code = CALL_EXPR;
591 gimple fn_from;
592
593 val = iterative_hash_object (code, val);
594 fn_from = expr->ops.call.fn_from;
595 if (gimple_call_internal_p (fn_from))
596 val = iterative_hash_hashval_t
597 ((hashval_t) gimple_call_internal_fn (fn_from), val);
598 else
599 val = iterative_hash_expr (gimple_call_fn (fn_from), val);
600 for (i = 0; i < expr->ops.call.nargs; i++)
601 val = iterative_hash_expr (expr->ops.call.args[i], val);
602 }
603 break;
604
605 case EXPR_PHI:
606 {
607 size_t i;
608
609 for (i = 0; i < expr->ops.phi.nargs; i++)
610 val = iterative_hash_expr (expr->ops.phi.args[i], val);
611 }
612 break;
613
614 default:
615 gcc_unreachable ();
616 }
617
618 return val;
619 }
620
621 /* Print a diagnostic dump of an expression hash table entry. */
622
623 static void
624 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
625 {
626 if (element->stmt)
627 fprintf (stream, "STMT ");
628 else
629 fprintf (stream, "COND ");
630
631 if (element->lhs)
632 {
633 print_generic_expr (stream, element->lhs, 0);
634 fprintf (stream, " = ");
635 }
636
637 switch (element->expr.kind)
638 {
639 case EXPR_SINGLE:
640 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
641 break;
642
643 case EXPR_UNARY:
644 fprintf (stream, "%s ", tree_code_name[element->expr.ops.unary.op]);
645 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
646 break;
647
648 case EXPR_BINARY:
649 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
650 fprintf (stream, " %s ", tree_code_name[element->expr.ops.binary.op]);
651 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
652 break;
653
654 case EXPR_TERNARY:
655 fprintf (stream, " %s <", tree_code_name[element->expr.ops.ternary.op]);
656 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
657 fputs (", ", stream);
658 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
659 fputs (", ", stream);
660 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
661 fputs (">", stream);
662 break;
663
664 case EXPR_CALL:
665 {
666 size_t i;
667 size_t nargs = element->expr.ops.call.nargs;
668 gimple fn_from;
669
670 fn_from = element->expr.ops.call.fn_from;
671 if (gimple_call_internal_p (fn_from))
672 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
673 stream);
674 else
675 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
676 fprintf (stream, " (");
677 for (i = 0; i < nargs; i++)
678 {
679 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
680 if (i + 1 < nargs)
681 fprintf (stream, ", ");
682 }
683 fprintf (stream, ")");
684 }
685 break;
686
687 case EXPR_PHI:
688 {
689 size_t i;
690 size_t nargs = element->expr.ops.phi.nargs;
691
692 fprintf (stream, "PHI <");
693 for (i = 0; i < nargs; i++)
694 {
695 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
696 if (i + 1 < nargs)
697 fprintf (stream, ", ");
698 }
699 fprintf (stream, ">");
700 }
701 break;
702 }
703 fprintf (stream, "\n");
704
705 if (element->stmt)
706 {
707 fprintf (stream, " ");
708 print_gimple_stmt (stream, element->stmt, 0, 0);
709 }
710 }
711
712 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
713
714 static void
715 free_expr_hash_elt_contents (struct expr_hash_elt *element)
716 {
717 if (element->expr.kind == EXPR_CALL)
718 free (element->expr.ops.call.args);
719 else if (element->expr.kind == EXPR_PHI)
720 free (element->expr.ops.phi.args);
721 }
722
723 /* Delete an expr_hash_elt and reclaim its storage. */
724
725 static void
726 free_expr_hash_elt (void *elt)
727 {
728 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
729 free_expr_hash_elt_contents (element);
730 free (element);
731 }
732
733 /* Allocate an EDGE_INFO for edge E and attach it to E.
734 Return the new EDGE_INFO structure. */
735
736 static struct edge_info *
737 allocate_edge_info (edge e)
738 {
739 struct edge_info *edge_info;
740
741 edge_info = XCNEW (struct edge_info);
742
743 e->aux = edge_info;
744 return edge_info;
745 }
746
747 /* Free all EDGE_INFO structures associated with edges in the CFG.
748 If a particular edge can be threaded, copy the redirection
749 target from the EDGE_INFO structure into the edge's AUX field
750 as required by code to update the CFG and SSA graph for
751 jump threading. */
752
753 static void
754 free_all_edge_infos (void)
755 {
756 basic_block bb;
757 edge_iterator ei;
758 edge e;
759
760 FOR_EACH_BB (bb)
761 {
762 FOR_EACH_EDGE (e, ei, bb->preds)
763 {
764 struct edge_info *edge_info = (struct edge_info *) e->aux;
765
766 if (edge_info)
767 {
768 edge_info->cond_equivalences.release ();
769 free (edge_info);
770 e->aux = NULL;
771 }
772 }
773 }
774 }
775
776 /* Jump threading, redundancy elimination and const/copy propagation.
777
778 This pass may expose new symbols that need to be renamed into SSA. For
779 every new symbol exposed, its corresponding bit will be set in
780 VARS_TO_RENAME. */
781
782 static unsigned int
783 tree_ssa_dominator_optimize (void)
784 {
785 struct dom_walk_data walk_data;
786
787 memset (&opt_stats, 0, sizeof (opt_stats));
788
789 /* Create our hash tables. */
790 avail_exprs.create (1024);
791 avail_exprs_stack.create (20);
792 const_and_copies_stack.create (20);
793 need_eh_cleanup = BITMAP_ALLOC (NULL);
794
795 /* Setup callbacks for the generic dominator tree walker. */
796 walk_data.dom_direction = CDI_DOMINATORS;
797 walk_data.initialize_block_local_data = NULL;
798 walk_data.before_dom_children = dom_opt_enter_block;
799 walk_data.after_dom_children = dom_opt_leave_block;
800 /* Right now we only attach a dummy COND_EXPR to the global data pointer.
801 When we attach more stuff we'll need to fill this out with a real
802 structure. */
803 walk_data.global_data = NULL;
804 walk_data.block_local_data_size = 0;
805
806 /* Now initialize the dominator walker. */
807 init_walk_dominator_tree (&walk_data);
808
809 calculate_dominance_info (CDI_DOMINATORS);
810 cfg_altered = false;
811
812 /* We need to know loop structures in order to avoid destroying them
813 in jump threading. Note that we still can e.g. thread through loop
814 headers to an exit edge, or through loop header to the loop body, assuming
815 that we update the loop info. */
816 loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
817
818 /* Initialize the value-handle array. */
819 threadedge_initialize_values ();
820
821 /* We need accurate information regarding back edges in the CFG
822 for jump threading; this may include back edges that are not part of
823 a single loop. */
824 mark_dfs_back_edges ();
825
826 /* Recursively walk the dominator tree optimizing statements. */
827 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
828
829 {
830 gimple_stmt_iterator gsi;
831 basic_block bb;
832 FOR_EACH_BB (bb)
833 {
834 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
835 update_stmt_if_modified (gsi_stmt (gsi));
836 }
837 }
838
839 /* If we exposed any new variables, go ahead and put them into
840 SSA form now, before we handle jump threading. This simplifies
841 interactions between rewriting of _DECL nodes into SSA form
842 and rewriting SSA_NAME nodes into SSA form after block
843 duplication and CFG manipulation. */
844 update_ssa (TODO_update_ssa);
845
846 free_all_edge_infos ();
847
848 /* Thread jumps, creating duplicate blocks as needed. */
849 cfg_altered |= thread_through_all_blocks (first_pass_instance);
850
851 if (cfg_altered)
852 free_dominance_info (CDI_DOMINATORS);
853
854 /* Removal of statements may make some EH edges dead. Purge
855 such edges from the CFG as needed. */
856 if (!bitmap_empty_p (need_eh_cleanup))
857 {
858 unsigned i;
859 bitmap_iterator bi;
860
861 /* Jump threading may have created forwarder blocks from blocks
862 needing EH cleanup; the new successor of these blocks, which
863 has inherited from the original block, needs the cleanup.
864 Don't clear bits in the bitmap, as that can break the bitmap
865 iterator. */
866 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
867 {
868 basic_block bb = BASIC_BLOCK (i);
869 if (bb == NULL)
870 continue;
871 while (single_succ_p (bb)
872 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
873 bb = single_succ (bb);
874 if (bb == EXIT_BLOCK_PTR)
875 continue;
876 if ((unsigned) bb->index != i)
877 bitmap_set_bit (need_eh_cleanup, bb->index);
878 }
879
880 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
881 bitmap_clear (need_eh_cleanup);
882 }
883
884 statistics_counter_event (cfun, "Redundant expressions eliminated",
885 opt_stats.num_re);
886 statistics_counter_event (cfun, "Constants propagated",
887 opt_stats.num_const_prop);
888 statistics_counter_event (cfun, "Copies propagated",
889 opt_stats.num_copy_prop);
890
891 /* Debugging dumps. */
892 if (dump_file && (dump_flags & TDF_STATS))
893 dump_dominator_optimization_stats (dump_file);
894
895 loop_optimizer_finalize ();
896
897 /* Delete our main hashtable. */
898 avail_exprs.dispose ();
899
900 /* And finalize the dominator walker. */
901 fini_walk_dominator_tree (&walk_data);
902
903 /* Free asserted bitmaps and stacks. */
904 BITMAP_FREE (need_eh_cleanup);
905
906 avail_exprs_stack.release ();
907 const_and_copies_stack.release ();
908
909 /* Free the value-handle array. */
910 threadedge_finalize_values ();
911 ssa_name_values.release ();
912
913 return 0;
914 }
915
916 static bool
917 gate_dominator (void)
918 {
919 return flag_tree_dom != 0;
920 }
921
922 namespace {
923
924 const pass_data pass_data_dominator =
925 {
926 GIMPLE_PASS, /* type */
927 "dom", /* name */
928 OPTGROUP_NONE, /* optinfo_flags */
929 true, /* has_gate */
930 true, /* has_execute */
931 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
932 ( PROP_cfg | PROP_ssa ), /* properties_required */
933 0, /* properties_provided */
934 0, /* properties_destroyed */
935 0, /* todo_flags_start */
936 ( TODO_cleanup_cfg | TODO_update_ssa
937 | TODO_verify_ssa
938 | TODO_verify_flow ), /* todo_flags_finish */
939 };
940
941 class pass_dominator : public gimple_opt_pass
942 {
943 public:
944 pass_dominator(gcc::context *ctxt)
945 : gimple_opt_pass(pass_data_dominator, ctxt)
946 {}
947
948 /* opt_pass methods: */
949 opt_pass * clone () { return new pass_dominator (ctxt_); }
950 bool gate () { return gate_dominator (); }
951 unsigned int execute () { return tree_ssa_dominator_optimize (); }
952
953 }; // class pass_dominator
954
955 } // anon namespace
956
957 gimple_opt_pass *
958 make_pass_dominator (gcc::context *ctxt)
959 {
960 return new pass_dominator (ctxt);
961 }
962
963
964 /* Given a conditional statement CONDSTMT, convert the
965 condition to a canonical form. */
966
967 static void
968 canonicalize_comparison (gimple condstmt)
969 {
970 tree op0;
971 tree op1;
972 enum tree_code code;
973
974 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
975
976 op0 = gimple_cond_lhs (condstmt);
977 op1 = gimple_cond_rhs (condstmt);
978
979 code = gimple_cond_code (condstmt);
980
981 /* If it would be profitable to swap the operands, then do so to
982 canonicalize the statement, enabling better optimization.
983
984 By placing canonicalization of such expressions here we
985 transparently keep statements in canonical form, even
986 when the statement is modified. */
987 if (tree_swap_operands_p (op0, op1, false))
988 {
989 /* For relationals we need to swap the operands
990 and change the code. */
991 if (code == LT_EXPR
992 || code == GT_EXPR
993 || code == LE_EXPR
994 || code == GE_EXPR)
995 {
996 code = swap_tree_comparison (code);
997
998 gimple_cond_set_code (condstmt, code);
999 gimple_cond_set_lhs (condstmt, op1);
1000 gimple_cond_set_rhs (condstmt, op0);
1001
1002 update_stmt (condstmt);
1003 }
1004 }
1005 }
1006
1007 /* Initialize local stacks for this optimizer and record equivalences
1008 upon entry to BB. Equivalences can come from the edge traversed to
1009 reach BB or they may come from PHI nodes at the start of BB. */
1010
1011 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1012 LIMIT entries left in LOCALs. */
1013
1014 static void
1015 remove_local_expressions_from_table (void)
1016 {
1017 /* Remove all the expressions made available in this block. */
1018 while (avail_exprs_stack.length () > 0)
1019 {
1020 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1021 expr_hash_elt **slot;
1022
1023 if (victim == NULL)
1024 break;
1025
1026 /* This must precede the actual removal from the hash table,
1027 as ELEMENT and the table entry may share a call argument
1028 vector which will be freed during removal. */
1029 if (dump_file && (dump_flags & TDF_DETAILS))
1030 {
1031 fprintf (dump_file, "<<<< ");
1032 print_expr_hash_elt (dump_file, victim);
1033 }
1034
1035 slot = avail_exprs.find_slot_with_hash (victim, victim->hash, NO_INSERT);
1036 gcc_assert (slot && *slot == victim);
1037 avail_exprs.clear_slot (slot);
1038 }
1039 }
1040
1041 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1042 CONST_AND_COPIES to its original state, stopping when we hit a
1043 NULL marker. */
1044
1045 static void
1046 restore_vars_to_original_value (void)
1047 {
1048 while (const_and_copies_stack.length () > 0)
1049 {
1050 tree prev_value, dest;
1051
1052 dest = const_and_copies_stack.pop ();
1053
1054 if (dest == NULL)
1055 break;
1056
1057 if (dump_file && (dump_flags & TDF_DETAILS))
1058 {
1059 fprintf (dump_file, "<<<< COPY ");
1060 print_generic_expr (dump_file, dest, 0);
1061 fprintf (dump_file, " = ");
1062 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1063 fprintf (dump_file, "\n");
1064 }
1065
1066 prev_value = const_and_copies_stack.pop ();
1067 set_ssa_name_value (dest, prev_value);
1068 }
1069 }
1070
1071 /* A trivial wrapper so that we can present the generic jump
1072 threading code with a simple API for simplifying statements. */
1073 static tree
1074 simplify_stmt_for_jump_threading (gimple stmt,
1075 gimple within_stmt ATTRIBUTE_UNUSED)
1076 {
1077 return lookup_avail_expr (stmt, false);
1078 }
1079
1080 /* Wrapper for common code to attempt to thread an edge. For example,
1081 it handles lazily building the dummy condition and the bookkeeping
1082 when jump threading is successful. */
1083
1084 static void
1085 dom_thread_across_edge (struct dom_walk_data *walk_data, edge e)
1086 {
1087 if (! walk_data->global_data)
1088 {
1089 gimple dummy_cond =
1090 gimple_build_cond (NE_EXPR,
1091 integer_zero_node, integer_zero_node,
1092 NULL, NULL);
1093 walk_data->global_data = dummy_cond;
1094 }
1095
1096 thread_across_edge ((gimple) walk_data->global_data, e, false,
1097 &const_and_copies_stack,
1098 simplify_stmt_for_jump_threading);
1099 }
1100
1101 /* PHI nodes can create equivalences too.
1102
1103 Ignoring any alternatives which are the same as the result, if
1104 all the alternatives are equal, then the PHI node creates an
1105 equivalence. */
1106
1107 static void
1108 record_equivalences_from_phis (basic_block bb)
1109 {
1110 gimple_stmt_iterator gsi;
1111
1112 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1113 {
1114 gimple phi = gsi_stmt (gsi);
1115
1116 tree lhs = gimple_phi_result (phi);
1117 tree rhs = NULL;
1118 size_t i;
1119
1120 for (i = 0; i < gimple_phi_num_args (phi); i++)
1121 {
1122 tree t = gimple_phi_arg_def (phi, i);
1123
1124 /* Ignore alternatives which are the same as our LHS. Since
1125 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1126 can simply compare pointers. */
1127 if (lhs == t)
1128 continue;
1129
1130 /* If we have not processed an alternative yet, then set
1131 RHS to this alternative. */
1132 if (rhs == NULL)
1133 rhs = t;
1134 /* If we have processed an alternative (stored in RHS), then
1135 see if it is equal to this one. If it isn't, then stop
1136 the search. */
1137 else if (! operand_equal_for_phi_arg_p (rhs, t))
1138 break;
1139 }
1140
1141 /* If we had no interesting alternatives, then all the RHS alternatives
1142 must have been the same as LHS. */
1143 if (!rhs)
1144 rhs = lhs;
1145
1146 /* If we managed to iterate through each PHI alternative without
1147 breaking out of the loop, then we have a PHI which may create
1148 a useful equivalence. We do not need to record unwind data for
1149 this, since this is a true assignment and not an equivalence
1150 inferred from a comparison. All uses of this ssa name are dominated
1151 by this assignment, so unwinding just costs time and space. */
1152 if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
1153 set_ssa_name_value (lhs, rhs);
1154 }
1155 }
1156
1157 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1158 return that edge. Otherwise return NULL. */
1159 static edge
1160 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1161 {
1162 edge retval = NULL;
1163 edge e;
1164 edge_iterator ei;
1165
1166 FOR_EACH_EDGE (e, ei, bb->preds)
1167 {
1168 /* A loop back edge can be identified by the destination of
1169 the edge dominating the source of the edge. */
1170 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1171 continue;
1172
1173 /* If we have already seen a non-loop edge, then we must have
1174 multiple incoming non-loop edges and thus we return NULL. */
1175 if (retval)
1176 return NULL;
1177
1178 /* This is the first non-loop incoming edge we have found. Record
1179 it. */
1180 retval = e;
1181 }
1182
1183 return retval;
1184 }
1185
1186 /* Record any equivalences created by the incoming edge to BB. If BB
1187 has more than one incoming edge, then no equivalence is created. */
1188
1189 static void
1190 record_equivalences_from_incoming_edge (basic_block bb)
1191 {
1192 edge e;
1193 basic_block parent;
1194 struct edge_info *edge_info;
1195
1196 /* If our parent block ended with a control statement, then we may be
1197 able to record some equivalences based on which outgoing edge from
1198 the parent was followed. */
1199 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1200
1201 e = single_incoming_edge_ignoring_loop_edges (bb);
1202
1203 /* If we had a single incoming edge from our parent block, then enter
1204 any data associated with the edge into our tables. */
1205 if (e && e->src == parent)
1206 {
1207 unsigned int i;
1208
1209 edge_info = (struct edge_info *) e->aux;
1210
1211 if (edge_info)
1212 {
1213 tree lhs = edge_info->lhs;
1214 tree rhs = edge_info->rhs;
1215 cond_equivalence *eq;
1216
1217 if (lhs)
1218 record_equality (lhs, rhs);
1219
1220 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1221 set via a widening type conversion, then we may be able to record
1222 additional equivalences. */
1223 if (lhs
1224 && TREE_CODE (lhs) == SSA_NAME
1225 && is_gimple_constant (rhs)
1226 && TREE_CODE (rhs) == INTEGER_CST)
1227 {
1228 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1229
1230 if (defstmt
1231 && is_gimple_assign (defstmt)
1232 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1233 {
1234 tree old_rhs = gimple_assign_rhs1 (defstmt);
1235
1236 /* If the conversion widens the original value and
1237 the constant is in the range of the type of OLD_RHS,
1238 then convert the constant and record the equivalence.
1239
1240 Note that int_fits_type_p does not check the precision
1241 if the upper and lower bounds are OK. */
1242 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1243 && (TYPE_PRECISION (TREE_TYPE (lhs))
1244 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1245 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1246 {
1247 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1248 record_equality (old_rhs, newval);
1249 }
1250 }
1251 }
1252
1253 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1254 record_cond (eq);
1255 }
1256 }
1257 }
1258
1259 /* Dump SSA statistics on FILE. */
1260
1261 void
1262 dump_dominator_optimization_stats (FILE *file)
1263 {
1264 fprintf (file, "Total number of statements: %6ld\n\n",
1265 opt_stats.num_stmts);
1266 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1267 opt_stats.num_exprs_considered);
1268
1269 fprintf (file, "\nHash table statistics:\n");
1270
1271 fprintf (file, " avail_exprs: ");
1272 htab_statistics (file, avail_exprs);
1273 }
1274
1275
1276 /* Dump SSA statistics on stderr. */
1277
1278 DEBUG_FUNCTION void
1279 debug_dominator_optimization_stats (void)
1280 {
1281 dump_dominator_optimization_stats (stderr);
1282 }
1283
1284
1285 /* Dump statistics for the hash table HTAB. */
1286
1287 static void
1288 htab_statistics (FILE *file, hash_table <expr_elt_hasher> htab)
1289 {
1290 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1291 (long) htab.size (),
1292 (long) htab.elements (),
1293 htab.collisions ());
1294 }
1295
1296
1297 /* Enter condition equivalence into the expression hash table.
1298 This indicates that a conditional expression has a known
1299 boolean value. */
1300
1301 static void
1302 record_cond (cond_equivalence *p)
1303 {
1304 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1305 expr_hash_elt **slot;
1306
1307 initialize_hash_element_from_expr (&p->cond, p->value, element);
1308
1309 slot = avail_exprs.find_slot_with_hash (element, element->hash, INSERT);
1310 if (*slot == NULL)
1311 {
1312 *slot = element;
1313
1314 if (dump_file && (dump_flags & TDF_DETAILS))
1315 {
1316 fprintf (dump_file, "1>>> ");
1317 print_expr_hash_elt (dump_file, element);
1318 }
1319
1320 avail_exprs_stack.safe_push (element);
1321 }
1322 else
1323 free_expr_hash_elt (element);
1324 }
1325
1326 /* Build a cond_equivalence record indicating that the comparison
1327 CODE holds between operands OP0 and OP1 and push it to **P. */
1328
1329 static void
1330 build_and_record_new_cond (enum tree_code code,
1331 tree op0, tree op1,
1332 vec<cond_equivalence> *p)
1333 {
1334 cond_equivalence c;
1335 struct hashable_expr *cond = &c.cond;
1336
1337 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1338
1339 cond->type = boolean_type_node;
1340 cond->kind = EXPR_BINARY;
1341 cond->ops.binary.op = code;
1342 cond->ops.binary.opnd0 = op0;
1343 cond->ops.binary.opnd1 = op1;
1344
1345 c.value = boolean_true_node;
1346 p->safe_push (c);
1347 }
1348
1349 /* Record that COND is true and INVERTED is false into the edge information
1350 structure. Also record that any conditions dominated by COND are true
1351 as well.
1352
1353 For example, if a < b is true, then a <= b must also be true. */
1354
1355 static void
1356 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1357 {
1358 tree op0, op1;
1359 cond_equivalence c;
1360
1361 if (!COMPARISON_CLASS_P (cond))
1362 return;
1363
1364 op0 = TREE_OPERAND (cond, 0);
1365 op1 = TREE_OPERAND (cond, 1);
1366
1367 switch (TREE_CODE (cond))
1368 {
1369 case LT_EXPR:
1370 case GT_EXPR:
1371 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1372 {
1373 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1374 &edge_info->cond_equivalences);
1375 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1376 &edge_info->cond_equivalences);
1377 }
1378
1379 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1380 ? LE_EXPR : GE_EXPR),
1381 op0, op1, &edge_info->cond_equivalences);
1382 build_and_record_new_cond (NE_EXPR, op0, op1,
1383 &edge_info->cond_equivalences);
1384 break;
1385
1386 case GE_EXPR:
1387 case LE_EXPR:
1388 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1389 {
1390 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1391 &edge_info->cond_equivalences);
1392 }
1393 break;
1394
1395 case EQ_EXPR:
1396 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1397 {
1398 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1399 &edge_info->cond_equivalences);
1400 }
1401 build_and_record_new_cond (LE_EXPR, op0, op1,
1402 &edge_info->cond_equivalences);
1403 build_and_record_new_cond (GE_EXPR, op0, op1,
1404 &edge_info->cond_equivalences);
1405 break;
1406
1407 case UNORDERED_EXPR:
1408 build_and_record_new_cond (NE_EXPR, op0, op1,
1409 &edge_info->cond_equivalences);
1410 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1411 &edge_info->cond_equivalences);
1412 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1413 &edge_info->cond_equivalences);
1414 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1415 &edge_info->cond_equivalences);
1416 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1417 &edge_info->cond_equivalences);
1418 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1419 &edge_info->cond_equivalences);
1420 break;
1421
1422 case UNLT_EXPR:
1423 case UNGT_EXPR:
1424 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1425 ? UNLE_EXPR : UNGE_EXPR),
1426 op0, op1, &edge_info->cond_equivalences);
1427 build_and_record_new_cond (NE_EXPR, op0, op1,
1428 &edge_info->cond_equivalences);
1429 break;
1430
1431 case UNEQ_EXPR:
1432 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1433 &edge_info->cond_equivalences);
1434 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1435 &edge_info->cond_equivalences);
1436 break;
1437
1438 case LTGT_EXPR:
1439 build_and_record_new_cond (NE_EXPR, op0, op1,
1440 &edge_info->cond_equivalences);
1441 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1442 &edge_info->cond_equivalences);
1443 break;
1444
1445 default:
1446 break;
1447 }
1448
1449 /* Now store the original true and false conditions into the first
1450 two slots. */
1451 initialize_expr_from_cond (cond, &c.cond);
1452 c.value = boolean_true_node;
1453 edge_info->cond_equivalences.safe_push (c);
1454
1455 /* It is possible for INVERTED to be the negation of a comparison,
1456 and not a valid RHS or GIMPLE_COND condition. This happens because
1457 invert_truthvalue may return such an expression when asked to invert
1458 a floating-point comparison. These comparisons are not assumed to
1459 obey the trichotomy law. */
1460 initialize_expr_from_cond (inverted, &c.cond);
1461 c.value = boolean_false_node;
1462 edge_info->cond_equivalences.safe_push (c);
1463 }
1464
1465 /* A helper function for record_const_or_copy and record_equality.
1466 Do the work of recording the value and undo info. */
1467
1468 static void
1469 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1470 {
1471 set_ssa_name_value (x, y);
1472
1473 if (dump_file && (dump_flags & TDF_DETAILS))
1474 {
1475 fprintf (dump_file, "0>>> COPY ");
1476 print_generic_expr (dump_file, x, 0);
1477 fprintf (dump_file, " = ");
1478 print_generic_expr (dump_file, y, 0);
1479 fprintf (dump_file, "\n");
1480 }
1481
1482 const_and_copies_stack.reserve (2);
1483 const_and_copies_stack.quick_push (prev_x);
1484 const_and_copies_stack.quick_push (x);
1485 }
1486
1487 /* Return the loop depth of the basic block of the defining statement of X.
1488 This number should not be treated as absolutely correct because the loop
1489 information may not be completely up-to-date when dom runs. However, it
1490 will be relatively correct, and as more passes are taught to keep loop info
1491 up to date, the result will become more and more accurate. */
1492
1493 int
1494 loop_depth_of_name (tree x)
1495 {
1496 gimple defstmt;
1497 basic_block defbb;
1498
1499 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1500 if (TREE_CODE (x) != SSA_NAME)
1501 return 0;
1502
1503 /* Otherwise return the loop depth of the defining statement's bb.
1504 Note that there may not actually be a bb for this statement, if the
1505 ssa_name is live on entry. */
1506 defstmt = SSA_NAME_DEF_STMT (x);
1507 defbb = gimple_bb (defstmt);
1508 if (!defbb)
1509 return 0;
1510
1511 return bb_loop_depth (defbb);
1512 }
1513
1514 /* Record that X is equal to Y in const_and_copies. Record undo
1515 information in the block-local vector. */
1516
1517 static void
1518 record_const_or_copy (tree x, tree y)
1519 {
1520 tree prev_x = SSA_NAME_VALUE (x);
1521
1522 gcc_assert (TREE_CODE (x) == SSA_NAME);
1523
1524 if (TREE_CODE (y) == SSA_NAME)
1525 {
1526 tree tmp = SSA_NAME_VALUE (y);
1527 if (tmp)
1528 y = tmp;
1529 }
1530
1531 record_const_or_copy_1 (x, y, prev_x);
1532 }
1533
1534 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1535 This constrains the cases in which we may treat this as assignment. */
1536
1537 static void
1538 record_equality (tree x, tree y)
1539 {
1540 tree prev_x = NULL, prev_y = NULL;
1541
1542 if (TREE_CODE (x) == SSA_NAME)
1543 prev_x = SSA_NAME_VALUE (x);
1544 if (TREE_CODE (y) == SSA_NAME)
1545 prev_y = SSA_NAME_VALUE (y);
1546
1547 /* If one of the previous values is invariant, or invariant in more loops
1548 (by depth), then use that.
1549 Otherwise it doesn't matter which value we choose, just so
1550 long as we canonicalize on one value. */
1551 if (is_gimple_min_invariant (y))
1552 ;
1553 else if (is_gimple_min_invariant (x)
1554 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1555 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1556 else if (prev_x && is_gimple_min_invariant (prev_x))
1557 x = y, y = prev_x, prev_x = prev_y;
1558 else if (prev_y)
1559 y = prev_y;
1560
1561 /* After the swapping, we must have one SSA_NAME. */
1562 if (TREE_CODE (x) != SSA_NAME)
1563 return;
1564
1565 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1566 variable compared against zero. If we're honoring signed zeros,
1567 then we cannot record this value unless we know that the value is
1568 nonzero. */
1569 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1570 && (TREE_CODE (y) != REAL_CST
1571 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1572 return;
1573
1574 record_const_or_copy_1 (x, y, prev_x);
1575 }
1576
1577 /* Returns true when STMT is a simple iv increment. It detects the
1578 following situation:
1579
1580 i_1 = phi (..., i_2)
1581 i_2 = i_1 +/- ... */
1582
1583 bool
1584 simple_iv_increment_p (gimple stmt)
1585 {
1586 enum tree_code code;
1587 tree lhs, preinc;
1588 gimple phi;
1589 size_t i;
1590
1591 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1592 return false;
1593
1594 lhs = gimple_assign_lhs (stmt);
1595 if (TREE_CODE (lhs) != SSA_NAME)
1596 return false;
1597
1598 code = gimple_assign_rhs_code (stmt);
1599 if (code != PLUS_EXPR
1600 && code != MINUS_EXPR
1601 && code != POINTER_PLUS_EXPR)
1602 return false;
1603
1604 preinc = gimple_assign_rhs1 (stmt);
1605 if (TREE_CODE (preinc) != SSA_NAME)
1606 return false;
1607
1608 phi = SSA_NAME_DEF_STMT (preinc);
1609 if (gimple_code (phi) != GIMPLE_PHI)
1610 return false;
1611
1612 for (i = 0; i < gimple_phi_num_args (phi); i++)
1613 if (gimple_phi_arg_def (phi, i) == lhs)
1614 return true;
1615
1616 return false;
1617 }
1618
1619 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1620 known value for that SSA_NAME (or NULL if no value is known).
1621
1622 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1623 successors of BB. */
1624
1625 static void
1626 cprop_into_successor_phis (basic_block bb)
1627 {
1628 edge e;
1629 edge_iterator ei;
1630
1631 FOR_EACH_EDGE (e, ei, bb->succs)
1632 {
1633 int indx;
1634 gimple_stmt_iterator gsi;
1635
1636 /* If this is an abnormal edge, then we do not want to copy propagate
1637 into the PHI alternative associated with this edge. */
1638 if (e->flags & EDGE_ABNORMAL)
1639 continue;
1640
1641 gsi = gsi_start_phis (e->dest);
1642 if (gsi_end_p (gsi))
1643 continue;
1644
1645 /* We may have an equivalence associated with this edge. While
1646 we can not propagate it into non-dominated blocks, we can
1647 propagate them into PHIs in non-dominated blocks. */
1648
1649 /* Push the unwind marker so we can reset the const and copies
1650 table back to its original state after processing this edge. */
1651 const_and_copies_stack.safe_push (NULL_TREE);
1652
1653 /* Extract and record any simple NAME = VALUE equivalences.
1654
1655 Don't bother with [01] = COND equivalences, they're not useful
1656 here. */
1657 struct edge_info *edge_info = (struct edge_info *) e->aux;
1658 if (edge_info)
1659 {
1660 tree lhs = edge_info->lhs;
1661 tree rhs = edge_info->rhs;
1662
1663 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1664 record_const_or_copy (lhs, rhs);
1665 }
1666
1667 indx = e->dest_idx;
1668 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1669 {
1670 tree new_val;
1671 use_operand_p orig_p;
1672 tree orig_val;
1673 gimple phi = gsi_stmt (gsi);
1674
1675 /* The alternative may be associated with a constant, so verify
1676 it is an SSA_NAME before doing anything with it. */
1677 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1678 orig_val = get_use_from_ptr (orig_p);
1679 if (TREE_CODE (orig_val) != SSA_NAME)
1680 continue;
1681
1682 /* If we have *ORIG_P in our constant/copy table, then replace
1683 ORIG_P with its value in our constant/copy table. */
1684 new_val = SSA_NAME_VALUE (orig_val);
1685 if (new_val
1686 && new_val != orig_val
1687 && (TREE_CODE (new_val) == SSA_NAME
1688 || is_gimple_min_invariant (new_val))
1689 && may_propagate_copy (orig_val, new_val))
1690 propagate_value (orig_p, new_val);
1691 }
1692
1693 restore_vars_to_original_value ();
1694 }
1695 }
1696
1697 /* We have finished optimizing BB, record any information implied by
1698 taking a specific outgoing edge from BB. */
1699
1700 static void
1701 record_edge_info (basic_block bb)
1702 {
1703 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1704 struct edge_info *edge_info;
1705
1706 if (! gsi_end_p (gsi))
1707 {
1708 gimple stmt = gsi_stmt (gsi);
1709 location_t loc = gimple_location (stmt);
1710
1711 if (gimple_code (stmt) == GIMPLE_SWITCH)
1712 {
1713 tree index = gimple_switch_index (stmt);
1714
1715 if (TREE_CODE (index) == SSA_NAME)
1716 {
1717 int i;
1718 int n_labels = gimple_switch_num_labels (stmt);
1719 tree *info = XCNEWVEC (tree, last_basic_block);
1720 edge e;
1721 edge_iterator ei;
1722
1723 for (i = 0; i < n_labels; i++)
1724 {
1725 tree label = gimple_switch_label (stmt, i);
1726 basic_block target_bb = label_to_block (CASE_LABEL (label));
1727 if (CASE_HIGH (label)
1728 || !CASE_LOW (label)
1729 || info[target_bb->index])
1730 info[target_bb->index] = error_mark_node;
1731 else
1732 info[target_bb->index] = label;
1733 }
1734
1735 FOR_EACH_EDGE (e, ei, bb->succs)
1736 {
1737 basic_block target_bb = e->dest;
1738 tree label = info[target_bb->index];
1739
1740 if (label != NULL && label != error_mark_node)
1741 {
1742 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1743 CASE_LOW (label));
1744 edge_info = allocate_edge_info (e);
1745 edge_info->lhs = index;
1746 edge_info->rhs = x;
1747 }
1748 }
1749 free (info);
1750 }
1751 }
1752
1753 /* A COND_EXPR may create equivalences too. */
1754 if (gimple_code (stmt) == GIMPLE_COND)
1755 {
1756 edge true_edge;
1757 edge false_edge;
1758
1759 tree op0 = gimple_cond_lhs (stmt);
1760 tree op1 = gimple_cond_rhs (stmt);
1761 enum tree_code code = gimple_cond_code (stmt);
1762
1763 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1764
1765 /* Special case comparing booleans against a constant as we
1766 know the value of OP0 on both arms of the branch. i.e., we
1767 can record an equivalence for OP0 rather than COND. */
1768 if ((code == EQ_EXPR || code == NE_EXPR)
1769 && TREE_CODE (op0) == SSA_NAME
1770 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1771 && is_gimple_min_invariant (op1))
1772 {
1773 if (code == EQ_EXPR)
1774 {
1775 edge_info = allocate_edge_info (true_edge);
1776 edge_info->lhs = op0;
1777 edge_info->rhs = (integer_zerop (op1)
1778 ? boolean_false_node
1779 : boolean_true_node);
1780
1781 edge_info = allocate_edge_info (false_edge);
1782 edge_info->lhs = op0;
1783 edge_info->rhs = (integer_zerop (op1)
1784 ? boolean_true_node
1785 : boolean_false_node);
1786 }
1787 else
1788 {
1789 edge_info = allocate_edge_info (true_edge);
1790 edge_info->lhs = op0;
1791 edge_info->rhs = (integer_zerop (op1)
1792 ? boolean_true_node
1793 : boolean_false_node);
1794
1795 edge_info = allocate_edge_info (false_edge);
1796 edge_info->lhs = op0;
1797 edge_info->rhs = (integer_zerop (op1)
1798 ? boolean_false_node
1799 : boolean_true_node);
1800 }
1801 }
1802 else if (is_gimple_min_invariant (op0)
1803 && (TREE_CODE (op1) == SSA_NAME
1804 || is_gimple_min_invariant (op1)))
1805 {
1806 tree cond = build2 (code, boolean_type_node, op0, op1);
1807 tree inverted = invert_truthvalue_loc (loc, cond);
1808 bool can_infer_simple_equiv
1809 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1810 && real_zerop (op0));
1811 struct edge_info *edge_info;
1812
1813 edge_info = allocate_edge_info (true_edge);
1814 record_conditions (edge_info, cond, inverted);
1815
1816 if (can_infer_simple_equiv && code == EQ_EXPR)
1817 {
1818 edge_info->lhs = op1;
1819 edge_info->rhs = op0;
1820 }
1821
1822 edge_info = allocate_edge_info (false_edge);
1823 record_conditions (edge_info, inverted, cond);
1824
1825 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1826 {
1827 edge_info->lhs = op1;
1828 edge_info->rhs = op0;
1829 }
1830 }
1831
1832 else if (TREE_CODE (op0) == SSA_NAME
1833 && (TREE_CODE (op1) == SSA_NAME
1834 || is_gimple_min_invariant (op1)))
1835 {
1836 tree cond = build2 (code, boolean_type_node, op0, op1);
1837 tree inverted = invert_truthvalue_loc (loc, cond);
1838 bool can_infer_simple_equiv
1839 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1840 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1841 struct edge_info *edge_info;
1842
1843 edge_info = allocate_edge_info (true_edge);
1844 record_conditions (edge_info, cond, inverted);
1845
1846 if (can_infer_simple_equiv && code == EQ_EXPR)
1847 {
1848 edge_info->lhs = op0;
1849 edge_info->rhs = op1;
1850 }
1851
1852 edge_info = allocate_edge_info (false_edge);
1853 record_conditions (edge_info, inverted, cond);
1854
1855 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1856 {
1857 edge_info->lhs = op0;
1858 edge_info->rhs = op1;
1859 }
1860 }
1861 }
1862
1863 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1864 }
1865 }
1866
1867 static void
1868 dom_opt_enter_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
1869 basic_block bb)
1870 {
1871 gimple_stmt_iterator gsi;
1872
1873 if (dump_file && (dump_flags & TDF_DETAILS))
1874 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1875
1876 /* Push a marker on the stacks of local information so that we know how
1877 far to unwind when we finalize this block. */
1878 avail_exprs_stack.safe_push (NULL);
1879 const_and_copies_stack.safe_push (NULL_TREE);
1880
1881 record_equivalences_from_incoming_edge (bb);
1882
1883 /* PHI nodes can create equivalences too. */
1884 record_equivalences_from_phis (bb);
1885
1886 /* Create equivalences from redundant PHIs. PHIs are only truly
1887 redundant when they exist in the same block, so push another
1888 marker and unwind right afterwards. */
1889 avail_exprs_stack.safe_push (NULL);
1890 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1891 eliminate_redundant_computations (&gsi);
1892 remove_local_expressions_from_table ();
1893
1894 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1895 optimize_stmt (bb, gsi);
1896
1897 /* Now prepare to process dominated blocks. */
1898 record_edge_info (bb);
1899 cprop_into_successor_phis (bb);
1900 }
1901
1902 /* We have finished processing the dominator children of BB, perform
1903 any finalization actions in preparation for leaving this node in
1904 the dominator tree. */
1905
1906 static void
1907 dom_opt_leave_block (struct dom_walk_data *walk_data, basic_block bb)
1908 {
1909 gimple last;
1910
1911 /* If we have an outgoing edge to a block with multiple incoming and
1912 outgoing edges, then we may be able to thread the edge, i.e., we
1913 may be able to statically determine which of the outgoing edges
1914 will be traversed when the incoming edge from BB is traversed. */
1915 if (single_succ_p (bb)
1916 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1917 && potentially_threadable_block (single_succ (bb)))
1918 {
1919 /* Push a marker on the stack, which thread_across_edge expects
1920 and will remove. */
1921 const_and_copies_stack.safe_push (NULL_TREE);
1922 dom_thread_across_edge (walk_data, single_succ_edge (bb));
1923 }
1924 else if ((last = last_stmt (bb))
1925 && gimple_code (last) == GIMPLE_COND
1926 && EDGE_COUNT (bb->succs) == 2
1927 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1928 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1929 {
1930 edge true_edge, false_edge;
1931
1932 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1933
1934 /* Only try to thread the edge if it reaches a target block with
1935 more than one predecessor and more than one successor. */
1936 if (potentially_threadable_block (true_edge->dest))
1937 {
1938 struct edge_info *edge_info;
1939 unsigned int i;
1940
1941 /* Push a marker onto the available expression stack so that we
1942 unwind any expressions related to the TRUE arm before processing
1943 the false arm below. */
1944 avail_exprs_stack.safe_push (NULL);
1945 const_and_copies_stack.safe_push (NULL_TREE);
1946
1947 edge_info = (struct edge_info *) true_edge->aux;
1948
1949 /* If we have info associated with this edge, record it into
1950 our equivalence tables. */
1951 if (edge_info)
1952 {
1953 cond_equivalence *eq;
1954 tree lhs = edge_info->lhs;
1955 tree rhs = edge_info->rhs;
1956
1957 /* If we have a simple NAME = VALUE equivalence, record it. */
1958 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1959 record_const_or_copy (lhs, rhs);
1960
1961 /* If we have 0 = COND or 1 = COND equivalences, record them
1962 into our expression hash tables. */
1963 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1964 record_cond (eq);
1965 }
1966
1967 dom_thread_across_edge (walk_data, true_edge);
1968
1969 /* And restore the various tables to their state before
1970 we threaded this edge. */
1971 remove_local_expressions_from_table ();
1972 }
1973
1974 /* Similarly for the ELSE arm. */
1975 if (potentially_threadable_block (false_edge->dest))
1976 {
1977 struct edge_info *edge_info;
1978 unsigned int i;
1979
1980 const_and_copies_stack.safe_push (NULL_TREE);
1981 edge_info = (struct edge_info *) false_edge->aux;
1982
1983 /* If we have info associated with this edge, record it into
1984 our equivalence tables. */
1985 if (edge_info)
1986 {
1987 cond_equivalence *eq;
1988 tree lhs = edge_info->lhs;
1989 tree rhs = edge_info->rhs;
1990
1991 /* If we have a simple NAME = VALUE equivalence, record it. */
1992 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1993 record_const_or_copy (lhs, rhs);
1994
1995 /* If we have 0 = COND or 1 = COND equivalences, record them
1996 into our expression hash tables. */
1997 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1998 record_cond (eq);
1999 }
2000
2001 /* Now thread the edge. */
2002 dom_thread_across_edge (walk_data, false_edge);
2003
2004 /* No need to remove local expressions from our tables
2005 or restore vars to their original value as that will
2006 be done immediately below. */
2007 }
2008 }
2009
2010 remove_local_expressions_from_table ();
2011 restore_vars_to_original_value ();
2012 }
2013
2014 /* Search for redundant computations in STMT. If any are found, then
2015 replace them with the variable holding the result of the computation.
2016
2017 If safe, record this expression into the available expression hash
2018 table. */
2019
2020 static void
2021 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2022 {
2023 tree expr_type;
2024 tree cached_lhs;
2025 tree def;
2026 bool insert = true;
2027 bool assigns_var_p = false;
2028
2029 gimple stmt = gsi_stmt (*gsi);
2030
2031 if (gimple_code (stmt) == GIMPLE_PHI)
2032 def = gimple_phi_result (stmt);
2033 else
2034 def = gimple_get_lhs (stmt);
2035
2036 /* Certain expressions on the RHS can be optimized away, but can not
2037 themselves be entered into the hash tables. */
2038 if (! def
2039 || TREE_CODE (def) != SSA_NAME
2040 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2041 || gimple_vdef (stmt)
2042 /* Do not record equivalences for increments of ivs. This would create
2043 overlapping live ranges for a very questionable gain. */
2044 || simple_iv_increment_p (stmt))
2045 insert = false;
2046
2047 /* Check if the expression has been computed before. */
2048 cached_lhs = lookup_avail_expr (stmt, insert);
2049
2050 opt_stats.num_exprs_considered++;
2051
2052 /* Get the type of the expression we are trying to optimize. */
2053 if (is_gimple_assign (stmt))
2054 {
2055 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2056 assigns_var_p = true;
2057 }
2058 else if (gimple_code (stmt) == GIMPLE_COND)
2059 expr_type = boolean_type_node;
2060 else if (is_gimple_call (stmt))
2061 {
2062 gcc_assert (gimple_call_lhs (stmt));
2063 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2064 assigns_var_p = true;
2065 }
2066 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2067 expr_type = TREE_TYPE (gimple_switch_index (stmt));
2068 else if (gimple_code (stmt) == GIMPLE_PHI)
2069 /* We can't propagate into a phi, so the logic below doesn't apply.
2070 Instead record an equivalence between the cached LHS and the
2071 PHI result of this statement, provided they are in the same block.
2072 This should be sufficient to kill the redundant phi. */
2073 {
2074 if (def && cached_lhs)
2075 record_const_or_copy (def, cached_lhs);
2076 return;
2077 }
2078 else
2079 gcc_unreachable ();
2080
2081 if (!cached_lhs)
2082 return;
2083
2084 /* It is safe to ignore types here since we have already done
2085 type checking in the hashing and equality routines. In fact
2086 type checking here merely gets in the way of constant
2087 propagation. Also, make sure that it is safe to propagate
2088 CACHED_LHS into the expression in STMT. */
2089 if ((TREE_CODE (cached_lhs) != SSA_NAME
2090 && (assigns_var_p
2091 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2092 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2093 {
2094 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2095 || is_gimple_min_invariant (cached_lhs));
2096
2097 if (dump_file && (dump_flags & TDF_DETAILS))
2098 {
2099 fprintf (dump_file, " Replaced redundant expr '");
2100 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2101 fprintf (dump_file, "' with '");
2102 print_generic_expr (dump_file, cached_lhs, dump_flags);
2103 fprintf (dump_file, "'\n");
2104 }
2105
2106 opt_stats.num_re++;
2107
2108 if (assigns_var_p
2109 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2110 cached_lhs = fold_convert (expr_type, cached_lhs);
2111
2112 propagate_tree_value_into_stmt (gsi, cached_lhs);
2113
2114 /* Since it is always necessary to mark the result as modified,
2115 perhaps we should move this into propagate_tree_value_into_stmt
2116 itself. */
2117 gimple_set_modified (gsi_stmt (*gsi), true);
2118 }
2119 }
2120
2121 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2122 the available expressions table or the const_and_copies table.
2123 Detect and record those equivalences. */
2124 /* We handle only very simple copy equivalences here. The heavy
2125 lifing is done by eliminate_redundant_computations. */
2126
2127 static void
2128 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2129 {
2130 tree lhs;
2131 enum tree_code lhs_code;
2132
2133 gcc_assert (is_gimple_assign (stmt));
2134
2135 lhs = gimple_assign_lhs (stmt);
2136 lhs_code = TREE_CODE (lhs);
2137
2138 if (lhs_code == SSA_NAME
2139 && gimple_assign_single_p (stmt))
2140 {
2141 tree rhs = gimple_assign_rhs1 (stmt);
2142
2143 /* If the RHS of the assignment is a constant or another variable that
2144 may be propagated, register it in the CONST_AND_COPIES table. We
2145 do not need to record unwind data for this, since this is a true
2146 assignment and not an equivalence inferred from a comparison. All
2147 uses of this ssa name are dominated by this assignment, so unwinding
2148 just costs time and space. */
2149 if (may_optimize_p
2150 && (TREE_CODE (rhs) == SSA_NAME
2151 || is_gimple_min_invariant (rhs)))
2152 {
2153 if (dump_file && (dump_flags & TDF_DETAILS))
2154 {
2155 fprintf (dump_file, "==== ASGN ");
2156 print_generic_expr (dump_file, lhs, 0);
2157 fprintf (dump_file, " = ");
2158 print_generic_expr (dump_file, rhs, 0);
2159 fprintf (dump_file, "\n");
2160 }
2161
2162 set_ssa_name_value (lhs, rhs);
2163 }
2164 }
2165
2166 /* A memory store, even an aliased store, creates a useful
2167 equivalence. By exchanging the LHS and RHS, creating suitable
2168 vops and recording the result in the available expression table,
2169 we may be able to expose more redundant loads. */
2170 if (!gimple_has_volatile_ops (stmt)
2171 && gimple_references_memory_p (stmt)
2172 && gimple_assign_single_p (stmt)
2173 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2174 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2175 && !is_gimple_reg (lhs))
2176 {
2177 tree rhs = gimple_assign_rhs1 (stmt);
2178 gimple new_stmt;
2179
2180 /* Build a new statement with the RHS and LHS exchanged. */
2181 if (TREE_CODE (rhs) == SSA_NAME)
2182 {
2183 /* NOTE tuples. The call to gimple_build_assign below replaced
2184 a call to build_gimple_modify_stmt, which did not set the
2185 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2186 may cause an SSA validation failure, as the LHS may be a
2187 default-initialized name and should have no definition. I'm
2188 a bit dubious of this, as the artificial statement that we
2189 generate here may in fact be ill-formed, but it is simply
2190 used as an internal device in this pass, and never becomes
2191 part of the CFG. */
2192 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2193 new_stmt = gimple_build_assign (rhs, lhs);
2194 SSA_NAME_DEF_STMT (rhs) = defstmt;
2195 }
2196 else
2197 new_stmt = gimple_build_assign (rhs, lhs);
2198
2199 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2200
2201 /* Finally enter the statement into the available expression
2202 table. */
2203 lookup_avail_expr (new_stmt, true);
2204 }
2205 }
2206
2207 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2208 CONST_AND_COPIES. */
2209
2210 static void
2211 cprop_operand (gimple stmt, use_operand_p op_p)
2212 {
2213 tree val;
2214 tree op = USE_FROM_PTR (op_p);
2215
2216 /* If the operand has a known constant value or it is known to be a
2217 copy of some other variable, use the value or copy stored in
2218 CONST_AND_COPIES. */
2219 val = SSA_NAME_VALUE (op);
2220 if (val && val != op)
2221 {
2222 /* Do not replace hard register operands in asm statements. */
2223 if (gimple_code (stmt) == GIMPLE_ASM
2224 && !may_propagate_copy_into_asm (op))
2225 return;
2226
2227 /* Certain operands are not allowed to be copy propagated due
2228 to their interaction with exception handling and some GCC
2229 extensions. */
2230 if (!may_propagate_copy (op, val))
2231 return;
2232
2233 /* Do not propagate addresses that point to volatiles into memory
2234 stmts without volatile operands. */
2235 if (POINTER_TYPE_P (TREE_TYPE (val))
2236 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
2237 && gimple_has_mem_ops (stmt)
2238 && !gimple_has_volatile_ops (stmt))
2239 return;
2240
2241 /* Do not propagate copies if the propagated value is at a deeper loop
2242 depth than the propagatee. Otherwise, this may move loop variant
2243 variables outside of their loops and prevent coalescing
2244 opportunities. If the value was loop invariant, it will be hoisted
2245 by LICM and exposed for copy propagation. */
2246 if (loop_depth_of_name (val) > loop_depth_of_name (op))
2247 return;
2248
2249 /* Do not propagate copies into simple IV increment statements.
2250 See PR23821 for how this can disturb IV analysis. */
2251 if (TREE_CODE (val) != INTEGER_CST
2252 && simple_iv_increment_p (stmt))
2253 return;
2254
2255 /* Dump details. */
2256 if (dump_file && (dump_flags & TDF_DETAILS))
2257 {
2258 fprintf (dump_file, " Replaced '");
2259 print_generic_expr (dump_file, op, dump_flags);
2260 fprintf (dump_file, "' with %s '",
2261 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2262 print_generic_expr (dump_file, val, dump_flags);
2263 fprintf (dump_file, "'\n");
2264 }
2265
2266 if (TREE_CODE (val) != SSA_NAME)
2267 opt_stats.num_const_prop++;
2268 else
2269 opt_stats.num_copy_prop++;
2270
2271 propagate_value (op_p, val);
2272
2273 /* And note that we modified this statement. This is now
2274 safe, even if we changed virtual operands since we will
2275 rescan the statement and rewrite its operands again. */
2276 gimple_set_modified (stmt, true);
2277 }
2278 }
2279
2280 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2281 known value for that SSA_NAME (or NULL if no value is known).
2282
2283 Propagate values from CONST_AND_COPIES into the uses, vuses and
2284 vdef_ops of STMT. */
2285
2286 static void
2287 cprop_into_stmt (gimple stmt)
2288 {
2289 use_operand_p op_p;
2290 ssa_op_iter iter;
2291
2292 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2293 cprop_operand (stmt, op_p);
2294 }
2295
2296 /* Optimize the statement pointed to by iterator SI.
2297
2298 We try to perform some simplistic global redundancy elimination and
2299 constant propagation:
2300
2301 1- To detect global redundancy, we keep track of expressions that have
2302 been computed in this block and its dominators. If we find that the
2303 same expression is computed more than once, we eliminate repeated
2304 computations by using the target of the first one.
2305
2306 2- Constant values and copy assignments. This is used to do very
2307 simplistic constant and copy propagation. When a constant or copy
2308 assignment is found, we map the value on the RHS of the assignment to
2309 the variable in the LHS in the CONST_AND_COPIES table. */
2310
2311 static void
2312 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2313 {
2314 gimple stmt, old_stmt;
2315 bool may_optimize_p;
2316 bool modified_p = false;
2317
2318 old_stmt = stmt = gsi_stmt (si);
2319
2320 if (dump_file && (dump_flags & TDF_DETAILS))
2321 {
2322 fprintf (dump_file, "Optimizing statement ");
2323 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2324 }
2325
2326 if (gimple_code (stmt) == GIMPLE_COND)
2327 canonicalize_comparison (stmt);
2328
2329 update_stmt_if_modified (stmt);
2330 opt_stats.num_stmts++;
2331
2332 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2333 cprop_into_stmt (stmt);
2334
2335 /* If the statement has been modified with constant replacements,
2336 fold its RHS before checking for redundant computations. */
2337 if (gimple_modified_p (stmt))
2338 {
2339 tree rhs = NULL;
2340
2341 /* Try to fold the statement making sure that STMT is kept
2342 up to date. */
2343 if (fold_stmt (&si))
2344 {
2345 stmt = gsi_stmt (si);
2346 gimple_set_modified (stmt, true);
2347
2348 if (dump_file && (dump_flags & TDF_DETAILS))
2349 {
2350 fprintf (dump_file, " Folded to: ");
2351 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2352 }
2353 }
2354
2355 /* We only need to consider cases that can yield a gimple operand. */
2356 if (gimple_assign_single_p (stmt))
2357 rhs = gimple_assign_rhs1 (stmt);
2358 else if (gimple_code (stmt) == GIMPLE_GOTO)
2359 rhs = gimple_goto_dest (stmt);
2360 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2361 /* This should never be an ADDR_EXPR. */
2362 rhs = gimple_switch_index (stmt);
2363
2364 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2365 recompute_tree_invariant_for_addr_expr (rhs);
2366
2367 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2368 even if fold_stmt updated the stmt already and thus cleared
2369 gimple_modified_p flag on it. */
2370 modified_p = true;
2371 }
2372
2373 /* Check for redundant computations. Do this optimization only
2374 for assignments that have no volatile ops and conditionals. */
2375 may_optimize_p = (!gimple_has_side_effects (stmt)
2376 && (is_gimple_assign (stmt)
2377 || (is_gimple_call (stmt)
2378 && gimple_call_lhs (stmt) != NULL_TREE)
2379 || gimple_code (stmt) == GIMPLE_COND
2380 || gimple_code (stmt) == GIMPLE_SWITCH));
2381
2382 if (may_optimize_p)
2383 {
2384 if (gimple_code (stmt) == GIMPLE_CALL)
2385 {
2386 /* Resolve __builtin_constant_p. If it hasn't been
2387 folded to integer_one_node by now, it's fairly
2388 certain that the value simply isn't constant. */
2389 tree callee = gimple_call_fndecl (stmt);
2390 if (callee
2391 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2392 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2393 {
2394 propagate_tree_value_into_stmt (&si, integer_zero_node);
2395 stmt = gsi_stmt (si);
2396 }
2397 }
2398
2399 update_stmt_if_modified (stmt);
2400 eliminate_redundant_computations (&si);
2401 stmt = gsi_stmt (si);
2402
2403 /* Perform simple redundant store elimination. */
2404 if (gimple_assign_single_p (stmt)
2405 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2406 {
2407 tree lhs = gimple_assign_lhs (stmt);
2408 tree rhs = gimple_assign_rhs1 (stmt);
2409 tree cached_lhs;
2410 gimple new_stmt;
2411 if (TREE_CODE (rhs) == SSA_NAME)
2412 {
2413 tree tem = SSA_NAME_VALUE (rhs);
2414 if (tem)
2415 rhs = tem;
2416 }
2417 /* Build a new statement with the RHS and LHS exchanged. */
2418 if (TREE_CODE (rhs) == SSA_NAME)
2419 {
2420 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2421 new_stmt = gimple_build_assign (rhs, lhs);
2422 SSA_NAME_DEF_STMT (rhs) = defstmt;
2423 }
2424 else
2425 new_stmt = gimple_build_assign (rhs, lhs);
2426 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2427 cached_lhs = lookup_avail_expr (new_stmt, false);
2428 if (cached_lhs
2429 && rhs == cached_lhs)
2430 {
2431 basic_block bb = gimple_bb (stmt);
2432 unlink_stmt_vdef (stmt);
2433 if (gsi_remove (&si, true))
2434 {
2435 bitmap_set_bit (need_eh_cleanup, bb->index);
2436 if (dump_file && (dump_flags & TDF_DETAILS))
2437 fprintf (dump_file, " Flagged to clear EH edges.\n");
2438 }
2439 release_defs (stmt);
2440 return;
2441 }
2442 }
2443 }
2444
2445 /* Record any additional equivalences created by this statement. */
2446 if (is_gimple_assign (stmt))
2447 record_equivalences_from_stmt (stmt, may_optimize_p);
2448
2449 /* If STMT is a COND_EXPR and it was modified, then we may know
2450 where it goes. If that is the case, then mark the CFG as altered.
2451
2452 This will cause us to later call remove_unreachable_blocks and
2453 cleanup_tree_cfg when it is safe to do so. It is not safe to
2454 clean things up here since removal of edges and such can trigger
2455 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2456 the manager.
2457
2458 That's all fine and good, except that once SSA_NAMEs are released
2459 to the manager, we must not call create_ssa_name until all references
2460 to released SSA_NAMEs have been eliminated.
2461
2462 All references to the deleted SSA_NAMEs can not be eliminated until
2463 we remove unreachable blocks.
2464
2465 We can not remove unreachable blocks until after we have completed
2466 any queued jump threading.
2467
2468 We can not complete any queued jump threads until we have taken
2469 appropriate variables out of SSA form. Taking variables out of
2470 SSA form can call create_ssa_name and thus we lose.
2471
2472 Ultimately I suspect we're going to need to change the interface
2473 into the SSA_NAME manager. */
2474 if (gimple_modified_p (stmt) || modified_p)
2475 {
2476 tree val = NULL;
2477
2478 update_stmt_if_modified (stmt);
2479
2480 if (gimple_code (stmt) == GIMPLE_COND)
2481 val = fold_binary_loc (gimple_location (stmt),
2482 gimple_cond_code (stmt), boolean_type_node,
2483 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2484 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2485 val = gimple_switch_index (stmt);
2486
2487 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2488 cfg_altered = true;
2489
2490 /* If we simplified a statement in such a way as to be shown that it
2491 cannot trap, update the eh information and the cfg to match. */
2492 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2493 {
2494 bitmap_set_bit (need_eh_cleanup, bb->index);
2495 if (dump_file && (dump_flags & TDF_DETAILS))
2496 fprintf (dump_file, " Flagged to clear EH edges.\n");
2497 }
2498 }
2499 }
2500
2501 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2502 If found, return its LHS. Otherwise insert STMT in the table and
2503 return NULL_TREE.
2504
2505 Also, when an expression is first inserted in the table, it is also
2506 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2507 we finish processing this block and its children. */
2508
2509 static tree
2510 lookup_avail_expr (gimple stmt, bool insert)
2511 {
2512 expr_hash_elt **slot;
2513 tree lhs;
2514 tree temp;
2515 struct expr_hash_elt element;
2516
2517 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2518 if (gimple_code (stmt) == GIMPLE_PHI)
2519 lhs = gimple_phi_result (stmt);
2520 else
2521 lhs = gimple_get_lhs (stmt);
2522
2523 initialize_hash_element (stmt, lhs, &element);
2524
2525 if (dump_file && (dump_flags & TDF_DETAILS))
2526 {
2527 fprintf (dump_file, "LKUP ");
2528 print_expr_hash_elt (dump_file, &element);
2529 }
2530
2531 /* Don't bother remembering constant assignments and copy operations.
2532 Constants and copy operations are handled by the constant/copy propagator
2533 in optimize_stmt. */
2534 if (element.expr.kind == EXPR_SINGLE
2535 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2536 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2537 return NULL_TREE;
2538
2539 /* Finally try to find the expression in the main expression hash table. */
2540 slot = avail_exprs.find_slot_with_hash (&element, element.hash,
2541 (insert ? INSERT : NO_INSERT));
2542 if (slot == NULL)
2543 {
2544 free_expr_hash_elt_contents (&element);
2545 return NULL_TREE;
2546 }
2547 else if (*slot == NULL)
2548 {
2549 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2550 *element2 = element;
2551 element2->stamp = element2;
2552 *slot = element2;
2553
2554 if (dump_file && (dump_flags & TDF_DETAILS))
2555 {
2556 fprintf (dump_file, "2>>> ");
2557 print_expr_hash_elt (dump_file, element2);
2558 }
2559
2560 avail_exprs_stack.safe_push (element2);
2561 return NULL_TREE;
2562 }
2563 else
2564 free_expr_hash_elt_contents (&element);
2565
2566 /* Extract the LHS of the assignment so that it can be used as the current
2567 definition of another variable. */
2568 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2569
2570 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2571 use the value from the const_and_copies table. */
2572 if (TREE_CODE (lhs) == SSA_NAME)
2573 {
2574 temp = SSA_NAME_VALUE (lhs);
2575 if (temp)
2576 lhs = temp;
2577 }
2578
2579 if (dump_file && (dump_flags & TDF_DETAILS))
2580 {
2581 fprintf (dump_file, "FIND: ");
2582 print_generic_expr (dump_file, lhs, 0);
2583 fprintf (dump_file, "\n");
2584 }
2585
2586 return lhs;
2587 }
2588
2589 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2590 for expressions using the code of the expression and the SSA numbers of
2591 its operands. */
2592
2593 static hashval_t
2594 avail_expr_hash (const void *p)
2595 {
2596 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2597 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2598 tree vuse;
2599 hashval_t val = 0;
2600
2601 val = iterative_hash_hashable_expr (expr, val);
2602
2603 /* If the hash table entry is not associated with a statement, then we
2604 can just hash the expression and not worry about virtual operands
2605 and such. */
2606 if (!stmt)
2607 return val;
2608
2609 /* Add the SSA version numbers of the vuse operand. This is important
2610 because compound variables like arrays are not renamed in the
2611 operands. Rather, the rename is done on the virtual variable
2612 representing all the elements of the array. */
2613 if ((vuse = gimple_vuse (stmt)))
2614 val = iterative_hash_expr (vuse, val);
2615
2616 return val;
2617 }
2618
2619 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2620 up degenerate PHIs created by or exposed by jump threading. */
2621
2622 /* Given PHI, return its RHS if the PHI is a degenerate, otherwise return
2623 NULL. */
2624
2625 tree
2626 degenerate_phi_result (gimple phi)
2627 {
2628 tree lhs = gimple_phi_result (phi);
2629 tree val = NULL;
2630 size_t i;
2631
2632 /* Ignoring arguments which are the same as LHS, if all the remaining
2633 arguments are the same, then the PHI is a degenerate and has the
2634 value of that common argument. */
2635 for (i = 0; i < gimple_phi_num_args (phi); i++)
2636 {
2637 tree arg = gimple_phi_arg_def (phi, i);
2638
2639 if (arg == lhs)
2640 continue;
2641 else if (!arg)
2642 break;
2643 else if (!val)
2644 val = arg;
2645 else if (arg == val)
2646 continue;
2647 /* We bring in some of operand_equal_p not only to speed things
2648 up, but also to avoid crashing when dereferencing the type of
2649 a released SSA name. */
2650 else if (TREE_CODE (val) != TREE_CODE (arg)
2651 || TREE_CODE (val) == SSA_NAME
2652 || !operand_equal_p (arg, val, 0))
2653 break;
2654 }
2655 return (i == gimple_phi_num_args (phi) ? val : NULL);
2656 }
2657
2658 /* Given a statement STMT, which is either a PHI node or an assignment,
2659 remove it from the IL. */
2660
2661 static void
2662 remove_stmt_or_phi (gimple stmt)
2663 {
2664 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2665
2666 if (gimple_code (stmt) == GIMPLE_PHI)
2667 remove_phi_node (&gsi, true);
2668 else
2669 {
2670 gsi_remove (&gsi, true);
2671 release_defs (stmt);
2672 }
2673 }
2674
2675 /* Given a statement STMT, which is either a PHI node or an assignment,
2676 return the "rhs" of the node, in the case of a non-degenerate
2677 phi, NULL is returned. */
2678
2679 static tree
2680 get_rhs_or_phi_arg (gimple stmt)
2681 {
2682 if (gimple_code (stmt) == GIMPLE_PHI)
2683 return degenerate_phi_result (stmt);
2684 else if (gimple_assign_single_p (stmt))
2685 return gimple_assign_rhs1 (stmt);
2686 else
2687 gcc_unreachable ();
2688 }
2689
2690
2691 /* Given a statement STMT, which is either a PHI node or an assignment,
2692 return the "lhs" of the node. */
2693
2694 static tree
2695 get_lhs_or_phi_result (gimple stmt)
2696 {
2697 if (gimple_code (stmt) == GIMPLE_PHI)
2698 return gimple_phi_result (stmt);
2699 else if (is_gimple_assign (stmt))
2700 return gimple_assign_lhs (stmt);
2701 else
2702 gcc_unreachable ();
2703 }
2704
2705 /* Propagate RHS into all uses of LHS (when possible).
2706
2707 RHS and LHS are derived from STMT, which is passed in solely so
2708 that we can remove it if propagation is successful.
2709
2710 When propagating into a PHI node or into a statement which turns
2711 into a trivial copy or constant initialization, set the
2712 appropriate bit in INTERESTING_NAMEs so that we will visit those
2713 nodes as well in an effort to pick up secondary optimization
2714 opportunities. */
2715
2716 static void
2717 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2718 {
2719 /* First verify that propagation is valid and isn't going to move a
2720 loop variant variable outside its loop. */
2721 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2722 && (TREE_CODE (rhs) != SSA_NAME
2723 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2724 && may_propagate_copy (lhs, rhs)
2725 && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2726 {
2727 use_operand_p use_p;
2728 imm_use_iterator iter;
2729 gimple use_stmt;
2730 bool all = true;
2731
2732 /* Dump details. */
2733 if (dump_file && (dump_flags & TDF_DETAILS))
2734 {
2735 fprintf (dump_file, " Replacing '");
2736 print_generic_expr (dump_file, lhs, dump_flags);
2737 fprintf (dump_file, "' with %s '",
2738 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2739 print_generic_expr (dump_file, rhs, dump_flags);
2740 fprintf (dump_file, "'\n");
2741 }
2742
2743 /* Walk over every use of LHS and try to replace the use with RHS.
2744 At this point the only reason why such a propagation would not
2745 be successful would be if the use occurs in an ASM_EXPR. */
2746 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2747 {
2748 /* Leave debug stmts alone. If we succeed in propagating
2749 all non-debug uses, we'll drop the DEF, and propagation
2750 into debug stmts will occur then. */
2751 if (gimple_debug_bind_p (use_stmt))
2752 continue;
2753
2754 /* It's not always safe to propagate into an ASM_EXPR. */
2755 if (gimple_code (use_stmt) == GIMPLE_ASM
2756 && ! may_propagate_copy_into_asm (lhs))
2757 {
2758 all = false;
2759 continue;
2760 }
2761
2762 /* It's not ok to propagate into the definition stmt of RHS.
2763 <bb 9>:
2764 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2765 g_67.1_6 = prephitmp.12_36;
2766 goto <bb 9>;
2767 While this is strictly all dead code we do not want to
2768 deal with this here. */
2769 if (TREE_CODE (rhs) == SSA_NAME
2770 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2771 {
2772 all = false;
2773 continue;
2774 }
2775
2776 /* Dump details. */
2777 if (dump_file && (dump_flags & TDF_DETAILS))
2778 {
2779 fprintf (dump_file, " Original statement:");
2780 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2781 }
2782
2783 /* Propagate the RHS into this use of the LHS. */
2784 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2785 propagate_value (use_p, rhs);
2786
2787 /* Special cases to avoid useless calls into the folding
2788 routines, operand scanning, etc.
2789
2790 Propagation into a PHI may cause the PHI to become
2791 a degenerate, so mark the PHI as interesting. No other
2792 actions are necessary. */
2793 if (gimple_code (use_stmt) == GIMPLE_PHI)
2794 {
2795 tree result;
2796
2797 /* Dump details. */
2798 if (dump_file && (dump_flags & TDF_DETAILS))
2799 {
2800 fprintf (dump_file, " Updated statement:");
2801 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2802 }
2803
2804 result = get_lhs_or_phi_result (use_stmt);
2805 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2806 continue;
2807 }
2808
2809 /* From this point onward we are propagating into a
2810 real statement. Folding may (or may not) be possible,
2811 we may expose new operands, expose dead EH edges,
2812 etc. */
2813 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2814 cannot fold a call that simplifies to a constant,
2815 because the GIMPLE_CALL must be replaced by a
2816 GIMPLE_ASSIGN, and there is no way to effect such a
2817 transformation in-place. We might want to consider
2818 using the more general fold_stmt here. */
2819 {
2820 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2821 fold_stmt_inplace (&gsi);
2822 }
2823
2824 /* Sometimes propagation can expose new operands to the
2825 renamer. */
2826 update_stmt (use_stmt);
2827
2828 /* Dump details. */
2829 if (dump_file && (dump_flags & TDF_DETAILS))
2830 {
2831 fprintf (dump_file, " Updated statement:");
2832 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2833 }
2834
2835 /* If we replaced a variable index with a constant, then
2836 we would need to update the invariant flag for ADDR_EXPRs. */
2837 if (gimple_assign_single_p (use_stmt)
2838 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2839 recompute_tree_invariant_for_addr_expr
2840 (gimple_assign_rhs1 (use_stmt));
2841
2842 /* If we cleaned up EH information from the statement,
2843 mark its containing block as needing EH cleanups. */
2844 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2845 {
2846 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2847 if (dump_file && (dump_flags & TDF_DETAILS))
2848 fprintf (dump_file, " Flagged to clear EH edges.\n");
2849 }
2850
2851 /* Propagation may expose new trivial copy/constant propagation
2852 opportunities. */
2853 if (gimple_assign_single_p (use_stmt)
2854 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2855 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2856 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2857 {
2858 tree result = get_lhs_or_phi_result (use_stmt);
2859 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2860 }
2861
2862 /* Propagation into these nodes may make certain edges in
2863 the CFG unexecutable. We want to identify them as PHI nodes
2864 at the destination of those unexecutable edges may become
2865 degenerates. */
2866 else if (gimple_code (use_stmt) == GIMPLE_COND
2867 || gimple_code (use_stmt) == GIMPLE_SWITCH
2868 || gimple_code (use_stmt) == GIMPLE_GOTO)
2869 {
2870 tree val;
2871
2872 if (gimple_code (use_stmt) == GIMPLE_COND)
2873 val = fold_binary_loc (gimple_location (use_stmt),
2874 gimple_cond_code (use_stmt),
2875 boolean_type_node,
2876 gimple_cond_lhs (use_stmt),
2877 gimple_cond_rhs (use_stmt));
2878 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2879 val = gimple_switch_index (use_stmt);
2880 else
2881 val = gimple_goto_dest (use_stmt);
2882
2883 if (val && is_gimple_min_invariant (val))
2884 {
2885 basic_block bb = gimple_bb (use_stmt);
2886 edge te = find_taken_edge (bb, val);
2887 edge_iterator ei;
2888 edge e;
2889 gimple_stmt_iterator gsi, psi;
2890
2891 /* Remove all outgoing edges except TE. */
2892 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2893 {
2894 if (e != te)
2895 {
2896 /* Mark all the PHI nodes at the destination of
2897 the unexecutable edge as interesting. */
2898 for (psi = gsi_start_phis (e->dest);
2899 !gsi_end_p (psi);
2900 gsi_next (&psi))
2901 {
2902 gimple phi = gsi_stmt (psi);
2903
2904 tree result = gimple_phi_result (phi);
2905 int version = SSA_NAME_VERSION (result);
2906
2907 bitmap_set_bit (interesting_names, version);
2908 }
2909
2910 te->probability += e->probability;
2911
2912 te->count += e->count;
2913 remove_edge (e);
2914 cfg_altered = true;
2915 }
2916 else
2917 ei_next (&ei);
2918 }
2919
2920 gsi = gsi_last_bb (gimple_bb (use_stmt));
2921 gsi_remove (&gsi, true);
2922
2923 /* And fixup the flags on the single remaining edge. */
2924 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2925 te->flags &= ~EDGE_ABNORMAL;
2926 te->flags |= EDGE_FALLTHRU;
2927 if (te->probability > REG_BR_PROB_BASE)
2928 te->probability = REG_BR_PROB_BASE;
2929 }
2930 }
2931 }
2932
2933 /* Ensure there is nothing else to do. */
2934 gcc_assert (!all || has_zero_uses (lhs));
2935
2936 /* If we were able to propagate away all uses of LHS, then
2937 we can remove STMT. */
2938 if (all)
2939 remove_stmt_or_phi (stmt);
2940 }
2941 }
2942
2943 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2944 a statement that is a trivial copy or constant initialization.
2945
2946 Attempt to eliminate T by propagating its RHS into all uses of
2947 its LHS. This may in turn set new bits in INTERESTING_NAMES
2948 for nodes we want to revisit later.
2949
2950 All exit paths should clear INTERESTING_NAMES for the result
2951 of STMT. */
2952
2953 static void
2954 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2955 {
2956 tree lhs = get_lhs_or_phi_result (stmt);
2957 tree rhs;
2958 int version = SSA_NAME_VERSION (lhs);
2959
2960 /* If the LHS of this statement or PHI has no uses, then we can
2961 just eliminate it. This can occur if, for example, the PHI
2962 was created by block duplication due to threading and its only
2963 use was in the conditional at the end of the block which was
2964 deleted. */
2965 if (has_zero_uses (lhs))
2966 {
2967 bitmap_clear_bit (interesting_names, version);
2968 remove_stmt_or_phi (stmt);
2969 return;
2970 }
2971
2972 /* Get the RHS of the assignment or PHI node if the PHI is a
2973 degenerate. */
2974 rhs = get_rhs_or_phi_arg (stmt);
2975 if (!rhs)
2976 {
2977 bitmap_clear_bit (interesting_names, version);
2978 return;
2979 }
2980
2981 if (!virtual_operand_p (lhs))
2982 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2983 else
2984 {
2985 gimple use_stmt;
2986 imm_use_iterator iter;
2987 use_operand_p use_p;
2988 /* For virtual operands we have to propagate into all uses as
2989 otherwise we will create overlapping life-ranges. */
2990 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2991 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2992 SET_USE (use_p, rhs);
2993 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2994 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2995 remove_stmt_or_phi (stmt);
2996 }
2997
2998 /* Note that STMT may well have been deleted by now, so do
2999 not access it, instead use the saved version # to clear
3000 T's entry in the worklist. */
3001 bitmap_clear_bit (interesting_names, version);
3002 }
3003
3004 /* The first phase in degenerate PHI elimination.
3005
3006 Eliminate the degenerate PHIs in BB, then recurse on the
3007 dominator children of BB. */
3008
3009 static void
3010 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
3011 {
3012 gimple_stmt_iterator gsi;
3013 basic_block son;
3014
3015 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3016 {
3017 gimple phi = gsi_stmt (gsi);
3018
3019 eliminate_const_or_copy (phi, interesting_names);
3020 }
3021
3022 /* Recurse into the dominator children of BB. */
3023 for (son = first_dom_son (CDI_DOMINATORS, bb);
3024 son;
3025 son = next_dom_son (CDI_DOMINATORS, son))
3026 eliminate_degenerate_phis_1 (son, interesting_names);
3027 }
3028
3029
3030 /* A very simple pass to eliminate degenerate PHI nodes from the
3031 IL. This is meant to be fast enough to be able to be run several
3032 times in the optimization pipeline.
3033
3034 Certain optimizations, particularly those which duplicate blocks
3035 or remove edges from the CFG can create or expose PHIs which are
3036 trivial copies or constant initializations.
3037
3038 While we could pick up these optimizations in DOM or with the
3039 combination of copy-prop and CCP, those solutions are far too
3040 heavy-weight for our needs.
3041
3042 This implementation has two phases so that we can efficiently
3043 eliminate the first order degenerate PHIs and second order
3044 degenerate PHIs.
3045
3046 The first phase performs a dominator walk to identify and eliminate
3047 the vast majority of the degenerate PHIs. When a degenerate PHI
3048 is identified and eliminated any affected statements or PHIs
3049 are put on a worklist.
3050
3051 The second phase eliminates degenerate PHIs and trivial copies
3052 or constant initializations using the worklist. This is how we
3053 pick up the secondary optimization opportunities with minimal
3054 cost. */
3055
3056 static unsigned int
3057 eliminate_degenerate_phis (void)
3058 {
3059 bitmap interesting_names;
3060 bitmap interesting_names1;
3061
3062 /* Bitmap of blocks which need EH information updated. We can not
3063 update it on-the-fly as doing so invalidates the dominator tree. */
3064 need_eh_cleanup = BITMAP_ALLOC (NULL);
3065
3066 /* INTERESTING_NAMES is effectively our worklist, indexed by
3067 SSA_NAME_VERSION.
3068
3069 A set bit indicates that the statement or PHI node which
3070 defines the SSA_NAME should be (re)examined to determine if
3071 it has become a degenerate PHI or trivial const/copy propagation
3072 opportunity.
3073
3074 Experiments have show we generally get better compilation
3075 time behavior with bitmaps rather than sbitmaps. */
3076 interesting_names = BITMAP_ALLOC (NULL);
3077 interesting_names1 = BITMAP_ALLOC (NULL);
3078
3079 calculate_dominance_info (CDI_DOMINATORS);
3080 cfg_altered = false;
3081
3082 /* First phase. Eliminate degenerate PHIs via a dominator
3083 walk of the CFG.
3084
3085 Experiments have indicated that we generally get better
3086 compile-time behavior by visiting blocks in the first
3087 phase in dominator order. Presumably this is because walking
3088 in dominator order leaves fewer PHIs for later examination
3089 by the worklist phase. */
3090 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
3091
3092 /* Second phase. Eliminate second order degenerate PHIs as well
3093 as trivial copies or constant initializations identified by
3094 the first phase or this phase. Basically we keep iterating
3095 until our set of INTERESTING_NAMEs is empty. */
3096 while (!bitmap_empty_p (interesting_names))
3097 {
3098 unsigned int i;
3099 bitmap_iterator bi;
3100
3101 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3102 changed during the loop. Copy it to another bitmap and
3103 use that. */
3104 bitmap_copy (interesting_names1, interesting_names);
3105
3106 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3107 {
3108 tree name = ssa_name (i);
3109
3110 /* Ignore SSA_NAMEs that have been released because
3111 their defining statement was deleted (unreachable). */
3112 if (name)
3113 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3114 interesting_names);
3115 }
3116 }
3117
3118 if (cfg_altered)
3119 {
3120 free_dominance_info (CDI_DOMINATORS);
3121 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3122 if (current_loops)
3123 loops_state_set (LOOPS_NEED_FIXUP);
3124 }
3125
3126 /* Propagation of const and copies may make some EH edges dead. Purge
3127 such edges from the CFG as needed. */
3128 if (!bitmap_empty_p (need_eh_cleanup))
3129 {
3130 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3131 BITMAP_FREE (need_eh_cleanup);
3132 }
3133
3134 BITMAP_FREE (interesting_names);
3135 BITMAP_FREE (interesting_names1);
3136 return 0;
3137 }
3138
3139 namespace {
3140
3141 const pass_data pass_data_phi_only_cprop =
3142 {
3143 GIMPLE_PASS, /* type */
3144 "phicprop", /* name */
3145 OPTGROUP_NONE, /* optinfo_flags */
3146 true, /* has_gate */
3147 true, /* has_execute */
3148 TV_TREE_PHI_CPROP, /* tv_id */
3149 ( PROP_cfg | PROP_ssa ), /* properties_required */
3150 0, /* properties_provided */
3151 0, /* properties_destroyed */
3152 0, /* todo_flags_start */
3153 ( TODO_cleanup_cfg | TODO_verify_ssa
3154 | TODO_verify_stmts
3155 | TODO_update_ssa ), /* todo_flags_finish */
3156 };
3157
3158 class pass_phi_only_cprop : public gimple_opt_pass
3159 {
3160 public:
3161 pass_phi_only_cprop(gcc::context *ctxt)
3162 : gimple_opt_pass(pass_data_phi_only_cprop, ctxt)
3163 {}
3164
3165 /* opt_pass methods: */
3166 opt_pass * clone () { return new pass_phi_only_cprop (ctxt_); }
3167 bool gate () { return gate_dominator (); }
3168 unsigned int execute () { return eliminate_degenerate_phis (); }
3169
3170 }; // class pass_phi_only_cprop
3171
3172 } // anon namespace
3173
3174 gimple_opt_pass *
3175 make_pass_phi_only_cprop (gcc::context *ctxt)
3176 {
3177 return new pass_phi_only_cprop (ctxt);
3178 }