]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-dom.c
gimple-predict.h: New file.
[thirdparty/gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "cfghooks.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "hard-reg-set.h"
29 #include "ssa.h"
30 #include "alias.h"
31 #include "fold-const.h"
32 #include "stor-layout.h"
33 #include "flags.h"
34 #include "tm_p.h"
35 #include "cfganal.h"
36 #include "cfgloop.h"
37 #include "gimple-pretty-print.h"
38 #include "internal-fn.h"
39 #include "gimple-fold.h"
40 #include "tree-eh.h"
41 #include "gimple-iterator.h"
42 #include "tree-cfg.h"
43 #include "tree-into-ssa.h"
44 #include "domwalk.h"
45 #include "tree-pass.h"
46 #include "tree-ssa-propagate.h"
47 #include "tree-ssa-threadupdate.h"
48 #include "langhooks.h"
49 #include "params.h"
50 #include "tree-ssa-scopedtables.h"
51 #include "tree-ssa-threadedge.h"
52 #include "tree-ssa-dom.h"
53 #include "gimplify.h"
54 #include "tree-cfgcleanup.h"
55
56 /* This file implements optimizations on the dominator tree. */
57
58 /* Representation of a "naked" right-hand-side expression, to be used
59 in recording available expressions in the expression hash table. */
60
61 enum expr_kind
62 {
63 EXPR_SINGLE,
64 EXPR_UNARY,
65 EXPR_BINARY,
66 EXPR_TERNARY,
67 EXPR_CALL,
68 EXPR_PHI
69 };
70
71 struct hashable_expr
72 {
73 tree type;
74 enum expr_kind kind;
75 union {
76 struct { tree rhs; } single;
77 struct { enum tree_code op; tree opnd; } unary;
78 struct { enum tree_code op; tree opnd0, opnd1; } binary;
79 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
80 struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
81 struct { size_t nargs; tree *args; } phi;
82 } ops;
83 };
84
85 /* Structure for recording known values of a conditional expression
86 at the exits from its block. */
87
88 typedef struct cond_equivalence_s
89 {
90 struct hashable_expr cond;
91 tree value;
92 } cond_equivalence;
93
94
95 /* Structure for recording edge equivalences as well as any pending
96 edge redirections during the dominator optimizer.
97
98 Computing and storing the edge equivalences instead of creating
99 them on-demand can save significant amounts of time, particularly
100 for pathological cases involving switch statements.
101
102 These structures live for a single iteration of the dominator
103 optimizer in the edge's AUX field. At the end of an iteration we
104 free each of these structures and update the AUX field to point
105 to any requested redirection target (the code for updating the
106 CFG and SSA graph for edge redirection expects redirection edge
107 targets to be in the AUX field for each edge. */
108
109 struct edge_info
110 {
111 /* If this edge creates a simple equivalence, the LHS and RHS of
112 the equivalence will be stored here. */
113 tree lhs;
114 tree rhs;
115
116 /* Traversing an edge may also indicate one or more particular conditions
117 are true or false. */
118 vec<cond_equivalence> cond_equivalences;
119 };
120
121 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
122 expressions it enters into the hash table along with a marker entry
123 (null). When we finish processing the block, we pop off entries and
124 remove the expressions from the global hash table until we hit the
125 marker. */
126 typedef struct expr_hash_elt * expr_hash_elt_t;
127
128 static vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > avail_exprs_stack;
129
130 /* Structure for entries in the expression hash table. */
131
132 struct expr_hash_elt
133 {
134 /* The value (lhs) of this expression. */
135 tree lhs;
136
137 /* The expression (rhs) we want to record. */
138 struct hashable_expr expr;
139
140 /* The virtual operand associated with the nearest dominating stmt
141 loading from or storing to expr. */
142 tree vop;
143
144 /* The hash value for RHS. */
145 hashval_t hash;
146
147 /* A unique stamp, typically the address of the hash
148 element itself, used in removing entries from the table. */
149 struct expr_hash_elt *stamp;
150 };
151
152 /* Hashtable helpers. */
153
154 static bool hashable_expr_equal_p (const struct hashable_expr *,
155 const struct hashable_expr *);
156 static void free_expr_hash_elt (void *);
157
158 struct expr_elt_hasher : pointer_hash <expr_hash_elt>
159 {
160 static inline hashval_t hash (const value_type &);
161 static inline bool equal (const value_type &, const compare_type &);
162 static inline void remove (value_type &);
163 };
164
165 inline hashval_t
166 expr_elt_hasher::hash (const value_type &p)
167 {
168 return p->hash;
169 }
170
171 inline bool
172 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
173 {
174 const struct hashable_expr *expr1 = &p1->expr;
175 const struct expr_hash_elt *stamp1 = p1->stamp;
176 const struct hashable_expr *expr2 = &p2->expr;
177 const struct expr_hash_elt *stamp2 = p2->stamp;
178
179 /* This case should apply only when removing entries from the table. */
180 if (stamp1 == stamp2)
181 return true;
182
183 if (p1->hash != p2->hash)
184 return false;
185
186 /* In case of a collision, both RHS have to be identical and have the
187 same VUSE operands. */
188 if (hashable_expr_equal_p (expr1, expr2)
189 && types_compatible_p (expr1->type, expr2->type))
190 return true;
191
192 return false;
193 }
194
195 /* Delete an expr_hash_elt and reclaim its storage. */
196
197 inline void
198 expr_elt_hasher::remove (value_type &element)
199 {
200 free_expr_hash_elt (element);
201 }
202
203 /* Hash table with expressions made available during the renaming process.
204 When an assignment of the form X_i = EXPR is found, the statement is
205 stored in this table. If the same expression EXPR is later found on the
206 RHS of another statement, it is replaced with X_i (thus performing
207 global redundancy elimination). Similarly as we pass through conditionals
208 we record the conditional itself as having either a true or false value
209 in this table. */
210 static hash_table<expr_elt_hasher> *avail_exprs;
211
212 /* Unwindable const/copy equivalences. */
213 static const_and_copies *const_and_copies;
214
215 /* Track whether or not we have changed the control flow graph. */
216 static bool cfg_altered;
217
218 /* Bitmap of blocks that have had EH statements cleaned. We should
219 remove their dead edges eventually. */
220 static bitmap need_eh_cleanup;
221 static vec<gimple> need_noreturn_fixup;
222
223 /* Statistics for dominator optimizations. */
224 struct opt_stats_d
225 {
226 long num_stmts;
227 long num_exprs_considered;
228 long num_re;
229 long num_const_prop;
230 long num_copy_prop;
231 };
232
233 static struct opt_stats_d opt_stats;
234
235 /* Local functions. */
236 static void optimize_stmt (basic_block, gimple_stmt_iterator);
237 static tree lookup_avail_expr (gimple, bool);
238 static hashval_t avail_expr_hash (const void *);
239 static void htab_statistics (FILE *,
240 const hash_table<expr_elt_hasher> &);
241 static void record_cond (cond_equivalence *);
242 static void record_equality (tree, tree);
243 static void record_equivalences_from_phis (basic_block);
244 static void record_equivalences_from_incoming_edge (basic_block);
245 static void eliminate_redundant_computations (gimple_stmt_iterator *);
246 static void record_equivalences_from_stmt (gimple, int);
247 static void remove_local_expressions_from_table (void);
248 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
249
250
251 /* Given a statement STMT, initialize the hash table element pointed to
252 by ELEMENT. */
253
254 static void
255 initialize_hash_element (gimple stmt, tree lhs,
256 struct expr_hash_elt *element)
257 {
258 enum gimple_code code = gimple_code (stmt);
259 struct hashable_expr *expr = &element->expr;
260
261 if (code == GIMPLE_ASSIGN)
262 {
263 enum tree_code subcode = gimple_assign_rhs_code (stmt);
264
265 switch (get_gimple_rhs_class (subcode))
266 {
267 case GIMPLE_SINGLE_RHS:
268 expr->kind = EXPR_SINGLE;
269 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
270 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
271 break;
272 case GIMPLE_UNARY_RHS:
273 expr->kind = EXPR_UNARY;
274 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
275 if (CONVERT_EXPR_CODE_P (subcode))
276 subcode = NOP_EXPR;
277 expr->ops.unary.op = subcode;
278 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
279 break;
280 case GIMPLE_BINARY_RHS:
281 expr->kind = EXPR_BINARY;
282 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
283 expr->ops.binary.op = subcode;
284 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
285 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
286 break;
287 case GIMPLE_TERNARY_RHS:
288 expr->kind = EXPR_TERNARY;
289 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
290 expr->ops.ternary.op = subcode;
291 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
292 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
293 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
294 break;
295 default:
296 gcc_unreachable ();
297 }
298 }
299 else if (code == GIMPLE_COND)
300 {
301 expr->type = boolean_type_node;
302 expr->kind = EXPR_BINARY;
303 expr->ops.binary.op = gimple_cond_code (stmt);
304 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
305 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
306 }
307 else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
308 {
309 size_t nargs = gimple_call_num_args (call_stmt);
310 size_t i;
311
312 gcc_assert (gimple_call_lhs (call_stmt));
313
314 expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
315 expr->kind = EXPR_CALL;
316 expr->ops.call.fn_from = call_stmt;
317
318 if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
319 expr->ops.call.pure = true;
320 else
321 expr->ops.call.pure = false;
322
323 expr->ops.call.nargs = nargs;
324 expr->ops.call.args = XCNEWVEC (tree, nargs);
325 for (i = 0; i < nargs; i++)
326 expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
327 }
328 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
329 {
330 expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
331 expr->kind = EXPR_SINGLE;
332 expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
333 }
334 else if (code == GIMPLE_GOTO)
335 {
336 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
337 expr->kind = EXPR_SINGLE;
338 expr->ops.single.rhs = gimple_goto_dest (stmt);
339 }
340 else if (code == GIMPLE_PHI)
341 {
342 size_t nargs = gimple_phi_num_args (stmt);
343 size_t i;
344
345 expr->type = TREE_TYPE (gimple_phi_result (stmt));
346 expr->kind = EXPR_PHI;
347 expr->ops.phi.nargs = nargs;
348 expr->ops.phi.args = XCNEWVEC (tree, nargs);
349
350 for (i = 0; i < nargs; i++)
351 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
352 }
353 else
354 gcc_unreachable ();
355
356 element->lhs = lhs;
357 element->vop = gimple_vuse (stmt);
358 element->hash = avail_expr_hash (element);
359 element->stamp = element;
360 }
361
362 /* Given a conditional expression COND as a tree, initialize
363 a hashable_expr expression EXPR. The conditional must be a
364 comparison or logical negation. A constant or a variable is
365 not permitted. */
366
367 static void
368 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
369 {
370 expr->type = boolean_type_node;
371
372 if (COMPARISON_CLASS_P (cond))
373 {
374 expr->kind = EXPR_BINARY;
375 expr->ops.binary.op = TREE_CODE (cond);
376 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
377 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
378 }
379 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
380 {
381 expr->kind = EXPR_UNARY;
382 expr->ops.unary.op = TRUTH_NOT_EXPR;
383 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
384 }
385 else
386 gcc_unreachable ();
387 }
388
389 /* Given a hashable_expr expression EXPR and an LHS,
390 initialize the hash table element pointed to by ELEMENT. */
391
392 static void
393 initialize_hash_element_from_expr (struct hashable_expr *expr,
394 tree lhs,
395 struct expr_hash_elt *element)
396 {
397 element->expr = *expr;
398 element->lhs = lhs;
399 element->vop = NULL_TREE;
400 element->hash = avail_expr_hash (element);
401 element->stamp = element;
402 }
403
404 /* Compare two hashable_expr structures for equivalence.
405 They are considered equivalent when the the expressions
406 they denote must necessarily be equal. The logic is intended
407 to follow that of operand_equal_p in fold-const.c */
408
409 static bool
410 hashable_expr_equal_p (const struct hashable_expr *expr0,
411 const struct hashable_expr *expr1)
412 {
413 tree type0 = expr0->type;
414 tree type1 = expr1->type;
415
416 /* If either type is NULL, there is nothing to check. */
417 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
418 return false;
419
420 /* If both types don't have the same signedness, precision, and mode,
421 then we can't consider them equal. */
422 if (type0 != type1
423 && (TREE_CODE (type0) == ERROR_MARK
424 || TREE_CODE (type1) == ERROR_MARK
425 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
426 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
427 || TYPE_MODE (type0) != TYPE_MODE (type1)))
428 return false;
429
430 if (expr0->kind != expr1->kind)
431 return false;
432
433 switch (expr0->kind)
434 {
435 case EXPR_SINGLE:
436 return operand_equal_p (expr0->ops.single.rhs,
437 expr1->ops.single.rhs, 0);
438
439 case EXPR_UNARY:
440 if (expr0->ops.unary.op != expr1->ops.unary.op)
441 return false;
442
443 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
444 || expr0->ops.unary.op == NON_LVALUE_EXPR)
445 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
446 return false;
447
448 return operand_equal_p (expr0->ops.unary.opnd,
449 expr1->ops.unary.opnd, 0);
450
451 case EXPR_BINARY:
452 if (expr0->ops.binary.op != expr1->ops.binary.op)
453 return false;
454
455 if (operand_equal_p (expr0->ops.binary.opnd0,
456 expr1->ops.binary.opnd0, 0)
457 && operand_equal_p (expr0->ops.binary.opnd1,
458 expr1->ops.binary.opnd1, 0))
459 return true;
460
461 /* For commutative ops, allow the other order. */
462 return (commutative_tree_code (expr0->ops.binary.op)
463 && operand_equal_p (expr0->ops.binary.opnd0,
464 expr1->ops.binary.opnd1, 0)
465 && operand_equal_p (expr0->ops.binary.opnd1,
466 expr1->ops.binary.opnd0, 0));
467
468 case EXPR_TERNARY:
469 if (expr0->ops.ternary.op != expr1->ops.ternary.op
470 || !operand_equal_p (expr0->ops.ternary.opnd2,
471 expr1->ops.ternary.opnd2, 0))
472 return false;
473
474 if (operand_equal_p (expr0->ops.ternary.opnd0,
475 expr1->ops.ternary.opnd0, 0)
476 && operand_equal_p (expr0->ops.ternary.opnd1,
477 expr1->ops.ternary.opnd1, 0))
478 return true;
479
480 /* For commutative ops, allow the other order. */
481 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
482 && operand_equal_p (expr0->ops.ternary.opnd0,
483 expr1->ops.ternary.opnd1, 0)
484 && operand_equal_p (expr0->ops.ternary.opnd1,
485 expr1->ops.ternary.opnd0, 0));
486
487 case EXPR_CALL:
488 {
489 size_t i;
490
491 /* If the calls are to different functions, then they
492 clearly cannot be equal. */
493 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
494 expr1->ops.call.fn_from))
495 return false;
496
497 if (! expr0->ops.call.pure)
498 return false;
499
500 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
501 return false;
502
503 for (i = 0; i < expr0->ops.call.nargs; i++)
504 if (! operand_equal_p (expr0->ops.call.args[i],
505 expr1->ops.call.args[i], 0))
506 return false;
507
508 if (stmt_could_throw_p (expr0->ops.call.fn_from))
509 {
510 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
511 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
512 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
513 return false;
514 }
515
516 return true;
517 }
518
519 case EXPR_PHI:
520 {
521 size_t i;
522
523 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
524 return false;
525
526 for (i = 0; i < expr0->ops.phi.nargs; i++)
527 if (! operand_equal_p (expr0->ops.phi.args[i],
528 expr1->ops.phi.args[i], 0))
529 return false;
530
531 return true;
532 }
533
534 default:
535 gcc_unreachable ();
536 }
537 }
538
539 /* Generate a hash value for a pair of expressions. This can be used
540 iteratively by passing a previous result in HSTATE.
541
542 The same hash value is always returned for a given pair of expressions,
543 regardless of the order in which they are presented. This is useful in
544 hashing the operands of commutative functions. */
545
546 namespace inchash
547 {
548
549 static void
550 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
551 {
552 hash one, two;
553
554 inchash::add_expr (t1, one);
555 inchash::add_expr (t2, two);
556 hstate.add_commutative (one, two);
557 }
558
559 /* Compute a hash value for a hashable_expr value EXPR and a
560 previously accumulated hash value VAL. If two hashable_expr
561 values compare equal with hashable_expr_equal_p, they must
562 hash to the same value, given an identical value of VAL.
563 The logic is intended to follow inchash::add_expr in tree.c. */
564
565 static void
566 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
567 {
568 switch (expr->kind)
569 {
570 case EXPR_SINGLE:
571 inchash::add_expr (expr->ops.single.rhs, hstate);
572 break;
573
574 case EXPR_UNARY:
575 hstate.add_object (expr->ops.unary.op);
576
577 /* Make sure to include signedness in the hash computation.
578 Don't hash the type, that can lead to having nodes which
579 compare equal according to operand_equal_p, but which
580 have different hash codes. */
581 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
582 || expr->ops.unary.op == NON_LVALUE_EXPR)
583 hstate.add_int (TYPE_UNSIGNED (expr->type));
584
585 inchash::add_expr (expr->ops.unary.opnd, hstate);
586 break;
587
588 case EXPR_BINARY:
589 hstate.add_object (expr->ops.binary.op);
590 if (commutative_tree_code (expr->ops.binary.op))
591 inchash::add_expr_commutative (expr->ops.binary.opnd0,
592 expr->ops.binary.opnd1, hstate);
593 else
594 {
595 inchash::add_expr (expr->ops.binary.opnd0, hstate);
596 inchash::add_expr (expr->ops.binary.opnd1, hstate);
597 }
598 break;
599
600 case EXPR_TERNARY:
601 hstate.add_object (expr->ops.ternary.op);
602 if (commutative_ternary_tree_code (expr->ops.ternary.op))
603 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
604 expr->ops.ternary.opnd1, hstate);
605 else
606 {
607 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
608 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
609 }
610 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
611 break;
612
613 case EXPR_CALL:
614 {
615 size_t i;
616 enum tree_code code = CALL_EXPR;
617 gcall *fn_from;
618
619 hstate.add_object (code);
620 fn_from = expr->ops.call.fn_from;
621 if (gimple_call_internal_p (fn_from))
622 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
623 else
624 inchash::add_expr (gimple_call_fn (fn_from), hstate);
625 for (i = 0; i < expr->ops.call.nargs; i++)
626 inchash::add_expr (expr->ops.call.args[i], hstate);
627 }
628 break;
629
630 case EXPR_PHI:
631 {
632 size_t i;
633
634 for (i = 0; i < expr->ops.phi.nargs; i++)
635 inchash::add_expr (expr->ops.phi.args[i], hstate);
636 }
637 break;
638
639 default:
640 gcc_unreachable ();
641 }
642 }
643
644 }
645
646 /* Print a diagnostic dump of an expression hash table entry. */
647
648 static void
649 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
650 {
651 fprintf (stream, "STMT ");
652
653 if (element->lhs)
654 {
655 print_generic_expr (stream, element->lhs, 0);
656 fprintf (stream, " = ");
657 }
658
659 switch (element->expr.kind)
660 {
661 case EXPR_SINGLE:
662 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
663 break;
664
665 case EXPR_UNARY:
666 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
667 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
668 break;
669
670 case EXPR_BINARY:
671 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
672 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
673 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
674 break;
675
676 case EXPR_TERNARY:
677 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
678 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
679 fputs (", ", stream);
680 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
681 fputs (", ", stream);
682 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
683 fputs (">", stream);
684 break;
685
686 case EXPR_CALL:
687 {
688 size_t i;
689 size_t nargs = element->expr.ops.call.nargs;
690 gcall *fn_from;
691
692 fn_from = element->expr.ops.call.fn_from;
693 if (gimple_call_internal_p (fn_from))
694 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
695 stream);
696 else
697 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
698 fprintf (stream, " (");
699 for (i = 0; i < nargs; i++)
700 {
701 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
702 if (i + 1 < nargs)
703 fprintf (stream, ", ");
704 }
705 fprintf (stream, ")");
706 }
707 break;
708
709 case EXPR_PHI:
710 {
711 size_t i;
712 size_t nargs = element->expr.ops.phi.nargs;
713
714 fprintf (stream, "PHI <");
715 for (i = 0; i < nargs; i++)
716 {
717 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
718 if (i + 1 < nargs)
719 fprintf (stream, ", ");
720 }
721 fprintf (stream, ">");
722 }
723 break;
724 }
725
726 if (element->vop)
727 {
728 fprintf (stream, " with ");
729 print_generic_expr (stream, element->vop, 0);
730 }
731
732 fprintf (stream, "\n");
733 }
734
735 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
736
737 static void
738 free_expr_hash_elt_contents (struct expr_hash_elt *element)
739 {
740 if (element->expr.kind == EXPR_CALL)
741 free (element->expr.ops.call.args);
742 else if (element->expr.kind == EXPR_PHI)
743 free (element->expr.ops.phi.args);
744 }
745
746 /* Delete an expr_hash_elt and reclaim its storage. */
747
748 static void
749 free_expr_hash_elt (void *elt)
750 {
751 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
752 free_expr_hash_elt_contents (element);
753 free (element);
754 }
755
756 /* Allocate an EDGE_INFO for edge E and attach it to E.
757 Return the new EDGE_INFO structure. */
758
759 static struct edge_info *
760 allocate_edge_info (edge e)
761 {
762 struct edge_info *edge_info;
763
764 edge_info = XCNEW (struct edge_info);
765
766 e->aux = edge_info;
767 return edge_info;
768 }
769
770 /* Free all EDGE_INFO structures associated with edges in the CFG.
771 If a particular edge can be threaded, copy the redirection
772 target from the EDGE_INFO structure into the edge's AUX field
773 as required by code to update the CFG and SSA graph for
774 jump threading. */
775
776 static void
777 free_all_edge_infos (void)
778 {
779 basic_block bb;
780 edge_iterator ei;
781 edge e;
782
783 FOR_EACH_BB_FN (bb, cfun)
784 {
785 FOR_EACH_EDGE (e, ei, bb->preds)
786 {
787 struct edge_info *edge_info = (struct edge_info *) e->aux;
788
789 if (edge_info)
790 {
791 edge_info->cond_equivalences.release ();
792 free (edge_info);
793 e->aux = NULL;
794 }
795 }
796 }
797 }
798
799 /* Build a cond_equivalence record indicating that the comparison
800 CODE holds between operands OP0 and OP1 and push it to **P. */
801
802 static void
803 build_and_record_new_cond (enum tree_code code,
804 tree op0, tree op1,
805 vec<cond_equivalence> *p,
806 bool val = true)
807 {
808 cond_equivalence c;
809 struct hashable_expr *cond = &c.cond;
810
811 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
812
813 cond->type = boolean_type_node;
814 cond->kind = EXPR_BINARY;
815 cond->ops.binary.op = code;
816 cond->ops.binary.opnd0 = op0;
817 cond->ops.binary.opnd1 = op1;
818
819 c.value = val ? boolean_true_node : boolean_false_node;
820 p->safe_push (c);
821 }
822
823 /* Record that COND is true and INVERTED is false into the edge information
824 structure. Also record that any conditions dominated by COND are true
825 as well.
826
827 For example, if a < b is true, then a <= b must also be true. */
828
829 static void
830 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
831 {
832 tree op0, op1;
833 cond_equivalence c;
834
835 if (!COMPARISON_CLASS_P (cond))
836 return;
837
838 op0 = TREE_OPERAND (cond, 0);
839 op1 = TREE_OPERAND (cond, 1);
840
841 switch (TREE_CODE (cond))
842 {
843 case LT_EXPR:
844 case GT_EXPR:
845 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
846 {
847 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
848 &edge_info->cond_equivalences);
849 build_and_record_new_cond (LTGT_EXPR, op0, op1,
850 &edge_info->cond_equivalences);
851 }
852
853 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
854 ? LE_EXPR : GE_EXPR),
855 op0, op1, &edge_info->cond_equivalences);
856 build_and_record_new_cond (NE_EXPR, op0, op1,
857 &edge_info->cond_equivalences);
858 build_and_record_new_cond (EQ_EXPR, op0, op1,
859 &edge_info->cond_equivalences, false);
860 break;
861
862 case GE_EXPR:
863 case LE_EXPR:
864 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
865 {
866 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
867 &edge_info->cond_equivalences);
868 }
869 break;
870
871 case EQ_EXPR:
872 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
873 {
874 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
875 &edge_info->cond_equivalences);
876 }
877 build_and_record_new_cond (LE_EXPR, op0, op1,
878 &edge_info->cond_equivalences);
879 build_and_record_new_cond (GE_EXPR, op0, op1,
880 &edge_info->cond_equivalences);
881 break;
882
883 case UNORDERED_EXPR:
884 build_and_record_new_cond (NE_EXPR, op0, op1,
885 &edge_info->cond_equivalences);
886 build_and_record_new_cond (UNLE_EXPR, op0, op1,
887 &edge_info->cond_equivalences);
888 build_and_record_new_cond (UNGE_EXPR, op0, op1,
889 &edge_info->cond_equivalences);
890 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
891 &edge_info->cond_equivalences);
892 build_and_record_new_cond (UNLT_EXPR, op0, op1,
893 &edge_info->cond_equivalences);
894 build_and_record_new_cond (UNGT_EXPR, op0, op1,
895 &edge_info->cond_equivalences);
896 break;
897
898 case UNLT_EXPR:
899 case UNGT_EXPR:
900 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
901 ? UNLE_EXPR : UNGE_EXPR),
902 op0, op1, &edge_info->cond_equivalences);
903 build_and_record_new_cond (NE_EXPR, op0, op1,
904 &edge_info->cond_equivalences);
905 break;
906
907 case UNEQ_EXPR:
908 build_and_record_new_cond (UNLE_EXPR, op0, op1,
909 &edge_info->cond_equivalences);
910 build_and_record_new_cond (UNGE_EXPR, op0, op1,
911 &edge_info->cond_equivalences);
912 break;
913
914 case LTGT_EXPR:
915 build_and_record_new_cond (NE_EXPR, op0, op1,
916 &edge_info->cond_equivalences);
917 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
918 &edge_info->cond_equivalences);
919 break;
920
921 default:
922 break;
923 }
924
925 /* Now store the original true and false conditions into the first
926 two slots. */
927 initialize_expr_from_cond (cond, &c.cond);
928 c.value = boolean_true_node;
929 edge_info->cond_equivalences.safe_push (c);
930
931 /* It is possible for INVERTED to be the negation of a comparison,
932 and not a valid RHS or GIMPLE_COND condition. This happens because
933 invert_truthvalue may return such an expression when asked to invert
934 a floating-point comparison. These comparisons are not assumed to
935 obey the trichotomy law. */
936 initialize_expr_from_cond (inverted, &c.cond);
937 c.value = boolean_false_node;
938 edge_info->cond_equivalences.safe_push (c);
939 }
940
941 /* We have finished optimizing BB, record any information implied by
942 taking a specific outgoing edge from BB. */
943
944 static void
945 record_edge_info (basic_block bb)
946 {
947 gimple_stmt_iterator gsi = gsi_last_bb (bb);
948 struct edge_info *edge_info;
949
950 if (! gsi_end_p (gsi))
951 {
952 gimple stmt = gsi_stmt (gsi);
953 location_t loc = gimple_location (stmt);
954
955 if (gimple_code (stmt) == GIMPLE_SWITCH)
956 {
957 gswitch *switch_stmt = as_a <gswitch *> (stmt);
958 tree index = gimple_switch_index (switch_stmt);
959
960 if (TREE_CODE (index) == SSA_NAME)
961 {
962 int i;
963 int n_labels = gimple_switch_num_labels (switch_stmt);
964 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
965 edge e;
966 edge_iterator ei;
967
968 for (i = 0; i < n_labels; i++)
969 {
970 tree label = gimple_switch_label (switch_stmt, i);
971 basic_block target_bb = label_to_block (CASE_LABEL (label));
972 if (CASE_HIGH (label)
973 || !CASE_LOW (label)
974 || info[target_bb->index])
975 info[target_bb->index] = error_mark_node;
976 else
977 info[target_bb->index] = label;
978 }
979
980 FOR_EACH_EDGE (e, ei, bb->succs)
981 {
982 basic_block target_bb = e->dest;
983 tree label = info[target_bb->index];
984
985 if (label != NULL && label != error_mark_node)
986 {
987 tree x = fold_convert_loc (loc, TREE_TYPE (index),
988 CASE_LOW (label));
989 edge_info = allocate_edge_info (e);
990 edge_info->lhs = index;
991 edge_info->rhs = x;
992 }
993 }
994 free (info);
995 }
996 }
997
998 /* A COND_EXPR may create equivalences too. */
999 if (gimple_code (stmt) == GIMPLE_COND)
1000 {
1001 edge true_edge;
1002 edge false_edge;
1003
1004 tree op0 = gimple_cond_lhs (stmt);
1005 tree op1 = gimple_cond_rhs (stmt);
1006 enum tree_code code = gimple_cond_code (stmt);
1007
1008 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1009
1010 /* Special case comparing booleans against a constant as we
1011 know the value of OP0 on both arms of the branch. i.e., we
1012 can record an equivalence for OP0 rather than COND. */
1013 if ((code == EQ_EXPR || code == NE_EXPR)
1014 && TREE_CODE (op0) == SSA_NAME
1015 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1016 && is_gimple_min_invariant (op1))
1017 {
1018 if (code == EQ_EXPR)
1019 {
1020 edge_info = allocate_edge_info (true_edge);
1021 edge_info->lhs = op0;
1022 edge_info->rhs = (integer_zerop (op1)
1023 ? boolean_false_node
1024 : boolean_true_node);
1025
1026 edge_info = allocate_edge_info (false_edge);
1027 edge_info->lhs = op0;
1028 edge_info->rhs = (integer_zerop (op1)
1029 ? boolean_true_node
1030 : boolean_false_node);
1031 }
1032 else
1033 {
1034 edge_info = allocate_edge_info (true_edge);
1035 edge_info->lhs = op0;
1036 edge_info->rhs = (integer_zerop (op1)
1037 ? boolean_true_node
1038 : boolean_false_node);
1039
1040 edge_info = allocate_edge_info (false_edge);
1041 edge_info->lhs = op0;
1042 edge_info->rhs = (integer_zerop (op1)
1043 ? boolean_false_node
1044 : boolean_true_node);
1045 }
1046 }
1047 else if (is_gimple_min_invariant (op0)
1048 && (TREE_CODE (op1) == SSA_NAME
1049 || is_gimple_min_invariant (op1)))
1050 {
1051 tree cond = build2 (code, boolean_type_node, op0, op1);
1052 tree inverted = invert_truthvalue_loc (loc, cond);
1053 bool can_infer_simple_equiv
1054 = !(HONOR_SIGNED_ZEROS (op0)
1055 && real_zerop (op0));
1056 struct edge_info *edge_info;
1057
1058 edge_info = allocate_edge_info (true_edge);
1059 record_conditions (edge_info, cond, inverted);
1060
1061 if (can_infer_simple_equiv && code == EQ_EXPR)
1062 {
1063 edge_info->lhs = op1;
1064 edge_info->rhs = op0;
1065 }
1066
1067 edge_info = allocate_edge_info (false_edge);
1068 record_conditions (edge_info, inverted, cond);
1069
1070 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1071 {
1072 edge_info->lhs = op1;
1073 edge_info->rhs = op0;
1074 }
1075 }
1076
1077 else if (TREE_CODE (op0) == SSA_NAME
1078 && (TREE_CODE (op1) == SSA_NAME
1079 || is_gimple_min_invariant (op1)))
1080 {
1081 tree cond = build2 (code, boolean_type_node, op0, op1);
1082 tree inverted = invert_truthvalue_loc (loc, cond);
1083 bool can_infer_simple_equiv
1084 = !(HONOR_SIGNED_ZEROS (op1)
1085 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1086 struct edge_info *edge_info;
1087
1088 edge_info = allocate_edge_info (true_edge);
1089 record_conditions (edge_info, cond, inverted);
1090
1091 if (can_infer_simple_equiv && code == EQ_EXPR)
1092 {
1093 edge_info->lhs = op0;
1094 edge_info->rhs = op1;
1095 }
1096
1097 edge_info = allocate_edge_info (false_edge);
1098 record_conditions (edge_info, inverted, cond);
1099
1100 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1101 {
1102 edge_info->lhs = op0;
1103 edge_info->rhs = op1;
1104 }
1105 }
1106 }
1107
1108 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1109 }
1110 }
1111
1112
1113 class dom_opt_dom_walker : public dom_walker
1114 {
1115 public:
1116 dom_opt_dom_walker (cdi_direction direction)
1117 : dom_walker (direction), m_dummy_cond (NULL) {}
1118
1119 virtual void before_dom_children (basic_block);
1120 virtual void after_dom_children (basic_block);
1121
1122 private:
1123 void thread_across_edge (edge);
1124
1125 gcond *m_dummy_cond;
1126 };
1127
1128 /* Jump threading, redundancy elimination and const/copy propagation.
1129
1130 This pass may expose new symbols that need to be renamed into SSA. For
1131 every new symbol exposed, its corresponding bit will be set in
1132 VARS_TO_RENAME. */
1133
1134 namespace {
1135
1136 const pass_data pass_data_dominator =
1137 {
1138 GIMPLE_PASS, /* type */
1139 "dom", /* name */
1140 OPTGROUP_NONE, /* optinfo_flags */
1141 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
1142 ( PROP_cfg | PROP_ssa ), /* properties_required */
1143 0, /* properties_provided */
1144 0, /* properties_destroyed */
1145 0, /* todo_flags_start */
1146 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
1147 };
1148
1149 class pass_dominator : public gimple_opt_pass
1150 {
1151 public:
1152 pass_dominator (gcc::context *ctxt)
1153 : gimple_opt_pass (pass_data_dominator, ctxt)
1154 {}
1155
1156 /* opt_pass methods: */
1157 opt_pass * clone () { return new pass_dominator (m_ctxt); }
1158 virtual bool gate (function *) { return flag_tree_dom != 0; }
1159 virtual unsigned int execute (function *);
1160
1161 }; // class pass_dominator
1162
1163 unsigned int
1164 pass_dominator::execute (function *fun)
1165 {
1166 memset (&opt_stats, 0, sizeof (opt_stats));
1167
1168 /* Create our hash tables. */
1169 avail_exprs = new hash_table<expr_elt_hasher> (1024);
1170 avail_exprs_stack.create (20);
1171 const_and_copies = new class const_and_copies (dump_file, dump_flags);
1172 need_eh_cleanup = BITMAP_ALLOC (NULL);
1173 need_noreturn_fixup.create (0);
1174
1175 calculate_dominance_info (CDI_DOMINATORS);
1176 cfg_altered = false;
1177
1178 /* We need to know loop structures in order to avoid destroying them
1179 in jump threading. Note that we still can e.g. thread through loop
1180 headers to an exit edge, or through loop header to the loop body, assuming
1181 that we update the loop info.
1182
1183 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
1184 to several overly conservative bail-outs in jump threading, case
1185 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
1186 missing. We should improve jump threading in future then
1187 LOOPS_HAVE_PREHEADERS won't be needed here. */
1188 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
1189
1190 /* Initialize the value-handle array. */
1191 threadedge_initialize_values ();
1192
1193 /* We need accurate information regarding back edges in the CFG
1194 for jump threading; this may include back edges that are not part of
1195 a single loop. */
1196 mark_dfs_back_edges ();
1197
1198 /* Recursively walk the dominator tree optimizing statements. */
1199 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
1200
1201 {
1202 gimple_stmt_iterator gsi;
1203 basic_block bb;
1204 FOR_EACH_BB_FN (bb, fun)
1205 {
1206 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1207 update_stmt_if_modified (gsi_stmt (gsi));
1208 }
1209 }
1210
1211 /* If we exposed any new variables, go ahead and put them into
1212 SSA form now, before we handle jump threading. This simplifies
1213 interactions between rewriting of _DECL nodes into SSA form
1214 and rewriting SSA_NAME nodes into SSA form after block
1215 duplication and CFG manipulation. */
1216 update_ssa (TODO_update_ssa);
1217
1218 free_all_edge_infos ();
1219
1220 /* Thread jumps, creating duplicate blocks as needed. */
1221 cfg_altered |= thread_through_all_blocks (first_pass_instance);
1222
1223 if (cfg_altered)
1224 free_dominance_info (CDI_DOMINATORS);
1225
1226 /* Removal of statements may make some EH edges dead. Purge
1227 such edges from the CFG as needed. */
1228 if (!bitmap_empty_p (need_eh_cleanup))
1229 {
1230 unsigned i;
1231 bitmap_iterator bi;
1232
1233 /* Jump threading may have created forwarder blocks from blocks
1234 needing EH cleanup; the new successor of these blocks, which
1235 has inherited from the original block, needs the cleanup.
1236 Don't clear bits in the bitmap, as that can break the bitmap
1237 iterator. */
1238 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
1239 {
1240 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
1241 if (bb == NULL)
1242 continue;
1243 while (single_succ_p (bb)
1244 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
1245 bb = single_succ (bb);
1246 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
1247 continue;
1248 if ((unsigned) bb->index != i)
1249 bitmap_set_bit (need_eh_cleanup, bb->index);
1250 }
1251
1252 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
1253 bitmap_clear (need_eh_cleanup);
1254 }
1255
1256 /* Fixup stmts that became noreturn calls. This may require splitting
1257 blocks and thus isn't possible during the dominator walk or before
1258 jump threading finished. Do this in reverse order so we don't
1259 inadvertedly remove a stmt we want to fixup by visiting a dominating
1260 now noreturn call first. */
1261 while (!need_noreturn_fixup.is_empty ())
1262 {
1263 gimple stmt = need_noreturn_fixup.pop ();
1264 if (dump_file && dump_flags & TDF_DETAILS)
1265 {
1266 fprintf (dump_file, "Fixing up noreturn call ");
1267 print_gimple_stmt (dump_file, stmt, 0, 0);
1268 fprintf (dump_file, "\n");
1269 }
1270 fixup_noreturn_call (stmt);
1271 }
1272
1273 statistics_counter_event (fun, "Redundant expressions eliminated",
1274 opt_stats.num_re);
1275 statistics_counter_event (fun, "Constants propagated",
1276 opt_stats.num_const_prop);
1277 statistics_counter_event (fun, "Copies propagated",
1278 opt_stats.num_copy_prop);
1279
1280 /* Debugging dumps. */
1281 if (dump_file && (dump_flags & TDF_STATS))
1282 dump_dominator_optimization_stats (dump_file);
1283
1284 loop_optimizer_finalize ();
1285
1286 /* Delete our main hashtable. */
1287 delete avail_exprs;
1288 avail_exprs = NULL;
1289
1290 /* Free asserted bitmaps and stacks. */
1291 BITMAP_FREE (need_eh_cleanup);
1292 need_noreturn_fixup.release ();
1293 avail_exprs_stack.release ();
1294 delete const_and_copies;
1295
1296 /* Free the value-handle array. */
1297 threadedge_finalize_values ();
1298
1299 return 0;
1300 }
1301
1302 } // anon namespace
1303
1304 gimple_opt_pass *
1305 make_pass_dominator (gcc::context *ctxt)
1306 {
1307 return new pass_dominator (ctxt);
1308 }
1309
1310
1311 /* Given a conditional statement CONDSTMT, convert the
1312 condition to a canonical form. */
1313
1314 static void
1315 canonicalize_comparison (gcond *condstmt)
1316 {
1317 tree op0;
1318 tree op1;
1319 enum tree_code code;
1320
1321 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1322
1323 op0 = gimple_cond_lhs (condstmt);
1324 op1 = gimple_cond_rhs (condstmt);
1325
1326 code = gimple_cond_code (condstmt);
1327
1328 /* If it would be profitable to swap the operands, then do so to
1329 canonicalize the statement, enabling better optimization.
1330
1331 By placing canonicalization of such expressions here we
1332 transparently keep statements in canonical form, even
1333 when the statement is modified. */
1334 if (tree_swap_operands_p (op0, op1, false))
1335 {
1336 /* For relationals we need to swap the operands
1337 and change the code. */
1338 if (code == LT_EXPR
1339 || code == GT_EXPR
1340 || code == LE_EXPR
1341 || code == GE_EXPR)
1342 {
1343 code = swap_tree_comparison (code);
1344
1345 gimple_cond_set_code (condstmt, code);
1346 gimple_cond_set_lhs (condstmt, op1);
1347 gimple_cond_set_rhs (condstmt, op0);
1348
1349 update_stmt (condstmt);
1350 }
1351 }
1352 }
1353
1354 /* Initialize local stacks for this optimizer and record equivalences
1355 upon entry to BB. Equivalences can come from the edge traversed to
1356 reach BB or they may come from PHI nodes at the start of BB. */
1357
1358 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1359 LIMIT entries left in LOCALs. */
1360
1361 static void
1362 remove_local_expressions_from_table (void)
1363 {
1364 /* Remove all the expressions made available in this block. */
1365 while (avail_exprs_stack.length () > 0)
1366 {
1367 std::pair<expr_hash_elt_t, expr_hash_elt_t> victim
1368 = avail_exprs_stack.pop ();
1369 expr_hash_elt **slot;
1370
1371 if (victim.first == NULL)
1372 break;
1373
1374 /* This must precede the actual removal from the hash table,
1375 as ELEMENT and the table entry may share a call argument
1376 vector which will be freed during removal. */
1377 if (dump_file && (dump_flags & TDF_DETAILS))
1378 {
1379 fprintf (dump_file, "<<<< ");
1380 print_expr_hash_elt (dump_file, victim.first);
1381 }
1382
1383 slot = avail_exprs->find_slot (victim.first, NO_INSERT);
1384 gcc_assert (slot && *slot == victim.first);
1385 if (victim.second != NULL)
1386 {
1387 free_expr_hash_elt (*slot);
1388 *slot = victim.second;
1389 }
1390 else
1391 avail_exprs->clear_slot (slot);
1392 }
1393 }
1394
1395 /* A trivial wrapper so that we can present the generic jump
1396 threading code with a simple API for simplifying statements. */
1397 static tree
1398 simplify_stmt_for_jump_threading (gimple stmt,
1399 gimple within_stmt ATTRIBUTE_UNUSED)
1400 {
1401 return lookup_avail_expr (stmt, false);
1402 }
1403
1404 /* Record into the equivalence tables any equivalences implied by
1405 traversing edge E (which are cached in E->aux).
1406
1407 Callers are responsible for managing the unwinding markers. */
1408 static void
1409 record_temporary_equivalences (edge e)
1410 {
1411 int i;
1412 struct edge_info *edge_info = (struct edge_info *) e->aux;
1413
1414 /* If we have info associated with this edge, record it into
1415 our equivalence tables. */
1416 if (edge_info)
1417 {
1418 cond_equivalence *eq;
1419 tree lhs = edge_info->lhs;
1420 tree rhs = edge_info->rhs;
1421
1422 /* If we have a simple NAME = VALUE equivalence, record it. */
1423 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1424 const_and_copies->record_const_or_copy (lhs, rhs);
1425
1426 /* If we have 0 = COND or 1 = COND equivalences, record them
1427 into our expression hash tables. */
1428 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1429 record_cond (eq);
1430 }
1431 }
1432
1433 /* Wrapper for common code to attempt to thread an edge. For example,
1434 it handles lazily building the dummy condition and the bookkeeping
1435 when jump threading is successful. */
1436
1437 void
1438 dom_opt_dom_walker::thread_across_edge (edge e)
1439 {
1440 if (! m_dummy_cond)
1441 m_dummy_cond =
1442 gimple_build_cond (NE_EXPR,
1443 integer_zero_node, integer_zero_node,
1444 NULL, NULL);
1445
1446 /* Push a marker on both stacks so we can unwind the tables back to their
1447 current state. */
1448 avail_exprs_stack.safe_push
1449 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1450 const_and_copies->push_marker ();
1451
1452 /* Traversing E may result in equivalences we can utilize. */
1453 record_temporary_equivalences (e);
1454
1455 /* With all the edge equivalences in the tables, go ahead and attempt
1456 to thread through E->dest. */
1457 ::thread_across_edge (m_dummy_cond, e, false,
1458 const_and_copies,
1459 simplify_stmt_for_jump_threading);
1460
1461 /* And restore the various tables to their state before
1462 we threaded this edge.
1463
1464 XXX The code in tree-ssa-threadedge.c will restore the state of
1465 the const_and_copies table. We we just have to restore the expression
1466 table. */
1467 remove_local_expressions_from_table ();
1468 }
1469
1470 /* PHI nodes can create equivalences too.
1471
1472 Ignoring any alternatives which are the same as the result, if
1473 all the alternatives are equal, then the PHI node creates an
1474 equivalence. */
1475
1476 static void
1477 record_equivalences_from_phis (basic_block bb)
1478 {
1479 gphi_iterator gsi;
1480
1481 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1482 {
1483 gphi *phi = gsi.phi ();
1484
1485 tree lhs = gimple_phi_result (phi);
1486 tree rhs = NULL;
1487 size_t i;
1488
1489 for (i = 0; i < gimple_phi_num_args (phi); i++)
1490 {
1491 tree t = gimple_phi_arg_def (phi, i);
1492
1493 /* Ignore alternatives which are the same as our LHS. Since
1494 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1495 can simply compare pointers. */
1496 if (lhs == t)
1497 continue;
1498
1499 /* Valueize t. */
1500 if (TREE_CODE (t) == SSA_NAME)
1501 {
1502 tree tmp = SSA_NAME_VALUE (t);
1503 t = tmp ? tmp : t;
1504 }
1505
1506 /* If we have not processed an alternative yet, then set
1507 RHS to this alternative. */
1508 if (rhs == NULL)
1509 rhs = t;
1510 /* If we have processed an alternative (stored in RHS), then
1511 see if it is equal to this one. If it isn't, then stop
1512 the search. */
1513 else if (! operand_equal_for_phi_arg_p (rhs, t))
1514 break;
1515 }
1516
1517 /* If we had no interesting alternatives, then all the RHS alternatives
1518 must have been the same as LHS. */
1519 if (!rhs)
1520 rhs = lhs;
1521
1522 /* If we managed to iterate through each PHI alternative without
1523 breaking out of the loop, then we have a PHI which may create
1524 a useful equivalence. We do not need to record unwind data for
1525 this, since this is a true assignment and not an equivalence
1526 inferred from a comparison. All uses of this ssa name are dominated
1527 by this assignment, so unwinding just costs time and space. */
1528 if (i == gimple_phi_num_args (phi)
1529 && may_propagate_copy (lhs, rhs))
1530 set_ssa_name_value (lhs, rhs);
1531 }
1532 }
1533
1534 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1535 return that edge. Otherwise return NULL. */
1536 static edge
1537 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1538 {
1539 edge retval = NULL;
1540 edge e;
1541 edge_iterator ei;
1542
1543 FOR_EACH_EDGE (e, ei, bb->preds)
1544 {
1545 /* A loop back edge can be identified by the destination of
1546 the edge dominating the source of the edge. */
1547 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1548 continue;
1549
1550 /* If we have already seen a non-loop edge, then we must have
1551 multiple incoming non-loop edges and thus we return NULL. */
1552 if (retval)
1553 return NULL;
1554
1555 /* This is the first non-loop incoming edge we have found. Record
1556 it. */
1557 retval = e;
1558 }
1559
1560 return retval;
1561 }
1562
1563 /* Record any equivalences created by the incoming edge to BB. If BB
1564 has more than one incoming edge, then no equivalence is created. */
1565
1566 static void
1567 record_equivalences_from_incoming_edge (basic_block bb)
1568 {
1569 edge e;
1570 basic_block parent;
1571 struct edge_info *edge_info;
1572
1573 /* If our parent block ended with a control statement, then we may be
1574 able to record some equivalences based on which outgoing edge from
1575 the parent was followed. */
1576 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1577
1578 e = single_incoming_edge_ignoring_loop_edges (bb);
1579
1580 /* If we had a single incoming edge from our parent block, then enter
1581 any data associated with the edge into our tables. */
1582 if (e && e->src == parent)
1583 {
1584 unsigned int i;
1585
1586 edge_info = (struct edge_info *) e->aux;
1587
1588 if (edge_info)
1589 {
1590 tree lhs = edge_info->lhs;
1591 tree rhs = edge_info->rhs;
1592 cond_equivalence *eq;
1593
1594 if (lhs)
1595 record_equality (lhs, rhs);
1596
1597 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1598 set via a widening type conversion, then we may be able to record
1599 additional equivalences. */
1600 if (lhs
1601 && TREE_CODE (lhs) == SSA_NAME
1602 && is_gimple_constant (rhs)
1603 && TREE_CODE (rhs) == INTEGER_CST)
1604 {
1605 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1606
1607 if (defstmt
1608 && is_gimple_assign (defstmt)
1609 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1610 {
1611 tree old_rhs = gimple_assign_rhs1 (defstmt);
1612
1613 /* If the conversion widens the original value and
1614 the constant is in the range of the type of OLD_RHS,
1615 then convert the constant and record the equivalence.
1616
1617 Note that int_fits_type_p does not check the precision
1618 if the upper and lower bounds are OK. */
1619 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1620 && (TYPE_PRECISION (TREE_TYPE (lhs))
1621 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1622 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1623 {
1624 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1625 record_equality (old_rhs, newval);
1626 }
1627 }
1628 }
1629
1630 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1631 record_cond (eq);
1632 }
1633 }
1634 }
1635
1636 /* Dump SSA statistics on FILE. */
1637
1638 void
1639 dump_dominator_optimization_stats (FILE *file)
1640 {
1641 fprintf (file, "Total number of statements: %6ld\n\n",
1642 opt_stats.num_stmts);
1643 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1644 opt_stats.num_exprs_considered);
1645
1646 fprintf (file, "\nHash table statistics:\n");
1647
1648 fprintf (file, " avail_exprs: ");
1649 htab_statistics (file, *avail_exprs);
1650 }
1651
1652
1653 /* Dump SSA statistics on stderr. */
1654
1655 DEBUG_FUNCTION void
1656 debug_dominator_optimization_stats (void)
1657 {
1658 dump_dominator_optimization_stats (stderr);
1659 }
1660
1661
1662 /* Dump statistics for the hash table HTAB. */
1663
1664 static void
1665 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1666 {
1667 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1668 (long) htab.size (),
1669 (long) htab.elements (),
1670 htab.collisions ());
1671 }
1672
1673
1674 /* Enter condition equivalence into the expression hash table.
1675 This indicates that a conditional expression has a known
1676 boolean value. */
1677
1678 static void
1679 record_cond (cond_equivalence *p)
1680 {
1681 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1682 expr_hash_elt **slot;
1683
1684 initialize_hash_element_from_expr (&p->cond, p->value, element);
1685
1686 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1687 if (*slot == NULL)
1688 {
1689 *slot = element;
1690
1691 if (dump_file && (dump_flags & TDF_DETAILS))
1692 {
1693 fprintf (dump_file, "1>>> ");
1694 print_expr_hash_elt (dump_file, element);
1695 }
1696
1697 avail_exprs_stack.safe_push
1698 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element, NULL));
1699 }
1700 else
1701 free_expr_hash_elt (element);
1702 }
1703
1704 /* Return the loop depth of the basic block of the defining statement of X.
1705 This number should not be treated as absolutely correct because the loop
1706 information may not be completely up-to-date when dom runs. However, it
1707 will be relatively correct, and as more passes are taught to keep loop info
1708 up to date, the result will become more and more accurate. */
1709
1710 static int
1711 loop_depth_of_name (tree x)
1712 {
1713 gimple defstmt;
1714 basic_block defbb;
1715
1716 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1717 if (TREE_CODE (x) != SSA_NAME)
1718 return 0;
1719
1720 /* Otherwise return the loop depth of the defining statement's bb.
1721 Note that there may not actually be a bb for this statement, if the
1722 ssa_name is live on entry. */
1723 defstmt = SSA_NAME_DEF_STMT (x);
1724 defbb = gimple_bb (defstmt);
1725 if (!defbb)
1726 return 0;
1727
1728 return bb_loop_depth (defbb);
1729 }
1730
1731 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1732 This constrains the cases in which we may treat this as assignment. */
1733
1734 static void
1735 record_equality (tree x, tree y)
1736 {
1737 tree prev_x = NULL, prev_y = NULL;
1738
1739 if (tree_swap_operands_p (x, y, false))
1740 std::swap (x, y);
1741
1742 /* Most of the time tree_swap_operands_p does what we want. But there
1743 are cases where we know one operand is better for copy propagation than
1744 the other. Given no other code cares about ordering of equality
1745 comparison operators for that purpose, we just handle the special cases
1746 here. */
1747 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1748 {
1749 /* If one operand is a single use operand, then make it
1750 X. This will preserve its single use properly and if this
1751 conditional is eliminated, the computation of X can be
1752 eliminated as well. */
1753 if (has_single_use (y) && ! has_single_use (x))
1754 std::swap (x, y);
1755 }
1756 if (TREE_CODE (x) == SSA_NAME)
1757 prev_x = SSA_NAME_VALUE (x);
1758 if (TREE_CODE (y) == SSA_NAME)
1759 prev_y = SSA_NAME_VALUE (y);
1760
1761 /* If one of the previous values is invariant, or invariant in more loops
1762 (by depth), then use that.
1763 Otherwise it doesn't matter which value we choose, just so
1764 long as we canonicalize on one value. */
1765 if (is_gimple_min_invariant (y))
1766 ;
1767 else if (is_gimple_min_invariant (x)
1768 /* ??? When threading over backedges the following is important
1769 for correctness. See PR61757. */
1770 || (loop_depth_of_name (x) < loop_depth_of_name (y)))
1771 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1772 else if (prev_x && is_gimple_min_invariant (prev_x))
1773 x = y, y = prev_x, prev_x = prev_y;
1774 else if (prev_y)
1775 y = prev_y;
1776
1777 /* After the swapping, we must have one SSA_NAME. */
1778 if (TREE_CODE (x) != SSA_NAME)
1779 return;
1780
1781 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1782 variable compared against zero. If we're honoring signed zeros,
1783 then we cannot record this value unless we know that the value is
1784 nonzero. */
1785 if (HONOR_SIGNED_ZEROS (x)
1786 && (TREE_CODE (y) != REAL_CST
1787 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1788 return;
1789
1790 const_and_copies->record_const_or_copy (x, y, prev_x);
1791 }
1792
1793 /* Returns true when STMT is a simple iv increment. It detects the
1794 following situation:
1795
1796 i_1 = phi (..., i_2)
1797 i_2 = i_1 +/- ... */
1798
1799 bool
1800 simple_iv_increment_p (gimple stmt)
1801 {
1802 enum tree_code code;
1803 tree lhs, preinc;
1804 gimple phi;
1805 size_t i;
1806
1807 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1808 return false;
1809
1810 lhs = gimple_assign_lhs (stmt);
1811 if (TREE_CODE (lhs) != SSA_NAME)
1812 return false;
1813
1814 code = gimple_assign_rhs_code (stmt);
1815 if (code != PLUS_EXPR
1816 && code != MINUS_EXPR
1817 && code != POINTER_PLUS_EXPR)
1818 return false;
1819
1820 preinc = gimple_assign_rhs1 (stmt);
1821 if (TREE_CODE (preinc) != SSA_NAME)
1822 return false;
1823
1824 phi = SSA_NAME_DEF_STMT (preinc);
1825 if (gimple_code (phi) != GIMPLE_PHI)
1826 return false;
1827
1828 for (i = 0; i < gimple_phi_num_args (phi); i++)
1829 if (gimple_phi_arg_def (phi, i) == lhs)
1830 return true;
1831
1832 return false;
1833 }
1834
1835 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1836 known value for that SSA_NAME (or NULL if no value is known).
1837
1838 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1839 successors of BB. */
1840
1841 static void
1842 cprop_into_successor_phis (basic_block bb)
1843 {
1844 edge e;
1845 edge_iterator ei;
1846
1847 FOR_EACH_EDGE (e, ei, bb->succs)
1848 {
1849 int indx;
1850 gphi_iterator gsi;
1851
1852 /* If this is an abnormal edge, then we do not want to copy propagate
1853 into the PHI alternative associated with this edge. */
1854 if (e->flags & EDGE_ABNORMAL)
1855 continue;
1856
1857 gsi = gsi_start_phis (e->dest);
1858 if (gsi_end_p (gsi))
1859 continue;
1860
1861 /* We may have an equivalence associated with this edge. While
1862 we can not propagate it into non-dominated blocks, we can
1863 propagate them into PHIs in non-dominated blocks. */
1864
1865 /* Push the unwind marker so we can reset the const and copies
1866 table back to its original state after processing this edge. */
1867 const_and_copies->push_marker ();
1868
1869 /* Extract and record any simple NAME = VALUE equivalences.
1870
1871 Don't bother with [01] = COND equivalences, they're not useful
1872 here. */
1873 struct edge_info *edge_info = (struct edge_info *) e->aux;
1874 if (edge_info)
1875 {
1876 tree lhs = edge_info->lhs;
1877 tree rhs = edge_info->rhs;
1878
1879 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1880 const_and_copies->record_const_or_copy (lhs, rhs);
1881 }
1882
1883 indx = e->dest_idx;
1884 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1885 {
1886 tree new_val;
1887 use_operand_p orig_p;
1888 tree orig_val;
1889 gphi *phi = gsi.phi ();
1890
1891 /* The alternative may be associated with a constant, so verify
1892 it is an SSA_NAME before doing anything with it. */
1893 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1894 orig_val = get_use_from_ptr (orig_p);
1895 if (TREE_CODE (orig_val) != SSA_NAME)
1896 continue;
1897
1898 /* If we have *ORIG_P in our constant/copy table, then replace
1899 ORIG_P with its value in our constant/copy table. */
1900 new_val = SSA_NAME_VALUE (orig_val);
1901 if (new_val
1902 && new_val != orig_val
1903 && (TREE_CODE (new_val) == SSA_NAME
1904 || is_gimple_min_invariant (new_val))
1905 && may_propagate_copy (orig_val, new_val))
1906 propagate_value (orig_p, new_val);
1907 }
1908
1909 const_and_copies->pop_to_marker ();
1910 }
1911 }
1912
1913 void
1914 dom_opt_dom_walker::before_dom_children (basic_block bb)
1915 {
1916 gimple_stmt_iterator gsi;
1917
1918 if (dump_file && (dump_flags & TDF_DETAILS))
1919 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1920
1921 /* Push a marker on the stacks of local information so that we know how
1922 far to unwind when we finalize this block. */
1923 avail_exprs_stack.safe_push
1924 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1925 const_and_copies->push_marker ();
1926
1927 record_equivalences_from_incoming_edge (bb);
1928
1929 /* PHI nodes can create equivalences too. */
1930 record_equivalences_from_phis (bb);
1931
1932 /* Create equivalences from redundant PHIs. PHIs are only truly
1933 redundant when they exist in the same block, so push another
1934 marker and unwind right afterwards. */
1935 avail_exprs_stack.safe_push
1936 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1937 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1938 eliminate_redundant_computations (&gsi);
1939 remove_local_expressions_from_table ();
1940
1941 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1942 optimize_stmt (bb, gsi);
1943
1944 /* Now prepare to process dominated blocks. */
1945 record_edge_info (bb);
1946 cprop_into_successor_phis (bb);
1947 }
1948
1949 /* We have finished processing the dominator children of BB, perform
1950 any finalization actions in preparation for leaving this node in
1951 the dominator tree. */
1952
1953 void
1954 dom_opt_dom_walker::after_dom_children (basic_block bb)
1955 {
1956 gimple last;
1957
1958 /* If we have an outgoing edge to a block with multiple incoming and
1959 outgoing edges, then we may be able to thread the edge, i.e., we
1960 may be able to statically determine which of the outgoing edges
1961 will be traversed when the incoming edge from BB is traversed. */
1962 if (single_succ_p (bb)
1963 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1964 && potentially_threadable_block (single_succ (bb)))
1965 {
1966 thread_across_edge (single_succ_edge (bb));
1967 }
1968 else if ((last = last_stmt (bb))
1969 && gimple_code (last) == GIMPLE_COND
1970 && EDGE_COUNT (bb->succs) == 2
1971 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1972 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1973 {
1974 edge true_edge, false_edge;
1975
1976 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1977
1978 /* Only try to thread the edge if it reaches a target block with
1979 more than one predecessor and more than one successor. */
1980 if (potentially_threadable_block (true_edge->dest))
1981 thread_across_edge (true_edge);
1982
1983 /* Similarly for the ELSE arm. */
1984 if (potentially_threadable_block (false_edge->dest))
1985 thread_across_edge (false_edge);
1986
1987 }
1988
1989 /* These remove expressions local to BB from the tables. */
1990 remove_local_expressions_from_table ();
1991 const_and_copies->pop_to_marker ();
1992 }
1993
1994 /* Search for redundant computations in STMT. If any are found, then
1995 replace them with the variable holding the result of the computation.
1996
1997 If safe, record this expression into the available expression hash
1998 table. */
1999
2000 static void
2001 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2002 {
2003 tree expr_type;
2004 tree cached_lhs;
2005 tree def;
2006 bool insert = true;
2007 bool assigns_var_p = false;
2008
2009 gimple stmt = gsi_stmt (*gsi);
2010
2011 if (gimple_code (stmt) == GIMPLE_PHI)
2012 def = gimple_phi_result (stmt);
2013 else
2014 def = gimple_get_lhs (stmt);
2015
2016 /* Certain expressions on the RHS can be optimized away, but can not
2017 themselves be entered into the hash tables. */
2018 if (! def
2019 || TREE_CODE (def) != SSA_NAME
2020 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2021 || gimple_vdef (stmt)
2022 /* Do not record equivalences for increments of ivs. This would create
2023 overlapping live ranges for a very questionable gain. */
2024 || simple_iv_increment_p (stmt))
2025 insert = false;
2026
2027 /* Check if the expression has been computed before. */
2028 cached_lhs = lookup_avail_expr (stmt, insert);
2029
2030 opt_stats.num_exprs_considered++;
2031
2032 /* Get the type of the expression we are trying to optimize. */
2033 if (is_gimple_assign (stmt))
2034 {
2035 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2036 assigns_var_p = true;
2037 }
2038 else if (gimple_code (stmt) == GIMPLE_COND)
2039 expr_type = boolean_type_node;
2040 else if (is_gimple_call (stmt))
2041 {
2042 gcc_assert (gimple_call_lhs (stmt));
2043 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2044 assigns_var_p = true;
2045 }
2046 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2047 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2048 else if (gimple_code (stmt) == GIMPLE_PHI)
2049 /* We can't propagate into a phi, so the logic below doesn't apply.
2050 Instead record an equivalence between the cached LHS and the
2051 PHI result of this statement, provided they are in the same block.
2052 This should be sufficient to kill the redundant phi. */
2053 {
2054 if (def && cached_lhs)
2055 const_and_copies->record_const_or_copy (def, cached_lhs);
2056 return;
2057 }
2058 else
2059 gcc_unreachable ();
2060
2061 if (!cached_lhs)
2062 return;
2063
2064 /* It is safe to ignore types here since we have already done
2065 type checking in the hashing and equality routines. In fact
2066 type checking here merely gets in the way of constant
2067 propagation. Also, make sure that it is safe to propagate
2068 CACHED_LHS into the expression in STMT. */
2069 if ((TREE_CODE (cached_lhs) != SSA_NAME
2070 && (assigns_var_p
2071 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2072 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2073 {
2074 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2075 || is_gimple_min_invariant (cached_lhs));
2076
2077 if (dump_file && (dump_flags & TDF_DETAILS))
2078 {
2079 fprintf (dump_file, " Replaced redundant expr '");
2080 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2081 fprintf (dump_file, "' with '");
2082 print_generic_expr (dump_file, cached_lhs, dump_flags);
2083 fprintf (dump_file, "'\n");
2084 }
2085
2086 opt_stats.num_re++;
2087
2088 if (assigns_var_p
2089 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2090 cached_lhs = fold_convert (expr_type, cached_lhs);
2091
2092 propagate_tree_value_into_stmt (gsi, cached_lhs);
2093
2094 /* Since it is always necessary to mark the result as modified,
2095 perhaps we should move this into propagate_tree_value_into_stmt
2096 itself. */
2097 gimple_set_modified (gsi_stmt (*gsi), true);
2098 }
2099 }
2100
2101 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2102 the available expressions table or the const_and_copies table.
2103 Detect and record those equivalences. */
2104 /* We handle only very simple copy equivalences here. The heavy
2105 lifing is done by eliminate_redundant_computations. */
2106
2107 static void
2108 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2109 {
2110 tree lhs;
2111 enum tree_code lhs_code;
2112
2113 gcc_assert (is_gimple_assign (stmt));
2114
2115 lhs = gimple_assign_lhs (stmt);
2116 lhs_code = TREE_CODE (lhs);
2117
2118 if (lhs_code == SSA_NAME
2119 && gimple_assign_single_p (stmt))
2120 {
2121 tree rhs = gimple_assign_rhs1 (stmt);
2122
2123 /* If the RHS of the assignment is a constant or another variable that
2124 may be propagated, register it in the CONST_AND_COPIES table. We
2125 do not need to record unwind data for this, since this is a true
2126 assignment and not an equivalence inferred from a comparison. All
2127 uses of this ssa name are dominated by this assignment, so unwinding
2128 just costs time and space. */
2129 if (may_optimize_p
2130 && (TREE_CODE (rhs) == SSA_NAME
2131 || is_gimple_min_invariant (rhs)))
2132 {
2133 /* Valueize rhs. */
2134 if (TREE_CODE (rhs) == SSA_NAME)
2135 {
2136 tree tmp = SSA_NAME_VALUE (rhs);
2137 rhs = tmp ? tmp : rhs;
2138 }
2139
2140 if (dump_file && (dump_flags & TDF_DETAILS))
2141 {
2142 fprintf (dump_file, "==== ASGN ");
2143 print_generic_expr (dump_file, lhs, 0);
2144 fprintf (dump_file, " = ");
2145 print_generic_expr (dump_file, rhs, 0);
2146 fprintf (dump_file, "\n");
2147 }
2148
2149 set_ssa_name_value (lhs, rhs);
2150 }
2151 }
2152
2153 /* Make sure we can propagate &x + CST. */
2154 if (lhs_code == SSA_NAME
2155 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
2156 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
2157 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
2158 {
2159 tree op0 = gimple_assign_rhs1 (stmt);
2160 tree op1 = gimple_assign_rhs2 (stmt);
2161 tree new_rhs
2162 = build_fold_addr_expr (fold_build2 (MEM_REF,
2163 TREE_TYPE (TREE_TYPE (op0)),
2164 unshare_expr (op0),
2165 fold_convert (ptr_type_node,
2166 op1)));
2167 if (dump_file && (dump_flags & TDF_DETAILS))
2168 {
2169 fprintf (dump_file, "==== ASGN ");
2170 print_generic_expr (dump_file, lhs, 0);
2171 fprintf (dump_file, " = ");
2172 print_generic_expr (dump_file, new_rhs, 0);
2173 fprintf (dump_file, "\n");
2174 }
2175
2176 set_ssa_name_value (lhs, new_rhs);
2177 }
2178
2179 /* A memory store, even an aliased store, creates a useful
2180 equivalence. By exchanging the LHS and RHS, creating suitable
2181 vops and recording the result in the available expression table,
2182 we may be able to expose more redundant loads. */
2183 if (!gimple_has_volatile_ops (stmt)
2184 && gimple_references_memory_p (stmt)
2185 && gimple_assign_single_p (stmt)
2186 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2187 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2188 && !is_gimple_reg (lhs))
2189 {
2190 tree rhs = gimple_assign_rhs1 (stmt);
2191 gassign *new_stmt;
2192
2193 /* Build a new statement with the RHS and LHS exchanged. */
2194 if (TREE_CODE (rhs) == SSA_NAME)
2195 {
2196 /* NOTE tuples. The call to gimple_build_assign below replaced
2197 a call to build_gimple_modify_stmt, which did not set the
2198 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2199 may cause an SSA validation failure, as the LHS may be a
2200 default-initialized name and should have no definition. I'm
2201 a bit dubious of this, as the artificial statement that we
2202 generate here may in fact be ill-formed, but it is simply
2203 used as an internal device in this pass, and never becomes
2204 part of the CFG. */
2205 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2206 new_stmt = gimple_build_assign (rhs, lhs);
2207 SSA_NAME_DEF_STMT (rhs) = defstmt;
2208 }
2209 else
2210 new_stmt = gimple_build_assign (rhs, lhs);
2211
2212 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2213
2214 /* Finally enter the statement into the available expression
2215 table. */
2216 lookup_avail_expr (new_stmt, true);
2217 }
2218 }
2219
2220 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2221 CONST_AND_COPIES. */
2222
2223 static void
2224 cprop_operand (gimple stmt, use_operand_p op_p)
2225 {
2226 tree val;
2227 tree op = USE_FROM_PTR (op_p);
2228
2229 /* If the operand has a known constant value or it is known to be a
2230 copy of some other variable, use the value or copy stored in
2231 CONST_AND_COPIES. */
2232 val = SSA_NAME_VALUE (op);
2233 if (val && val != op)
2234 {
2235 /* Do not replace hard register operands in asm statements. */
2236 if (gimple_code (stmt) == GIMPLE_ASM
2237 && !may_propagate_copy_into_asm (op))
2238 return;
2239
2240 /* Certain operands are not allowed to be copy propagated due
2241 to their interaction with exception handling and some GCC
2242 extensions. */
2243 if (!may_propagate_copy (op, val))
2244 return;
2245
2246 /* Do not propagate copies into BIVs.
2247 See PR23821 and PR62217 for how this can disturb IV and
2248 number of iteration analysis. */
2249 if (TREE_CODE (val) != INTEGER_CST)
2250 {
2251 gimple def = SSA_NAME_DEF_STMT (op);
2252 if (gimple_code (def) == GIMPLE_PHI
2253 && gimple_bb (def)->loop_father->header == gimple_bb (def))
2254 return;
2255 }
2256
2257 /* Dump details. */
2258 if (dump_file && (dump_flags & TDF_DETAILS))
2259 {
2260 fprintf (dump_file, " Replaced '");
2261 print_generic_expr (dump_file, op, dump_flags);
2262 fprintf (dump_file, "' with %s '",
2263 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2264 print_generic_expr (dump_file, val, dump_flags);
2265 fprintf (dump_file, "'\n");
2266 }
2267
2268 if (TREE_CODE (val) != SSA_NAME)
2269 opt_stats.num_const_prop++;
2270 else
2271 opt_stats.num_copy_prop++;
2272
2273 propagate_value (op_p, val);
2274
2275 /* And note that we modified this statement. This is now
2276 safe, even if we changed virtual operands since we will
2277 rescan the statement and rewrite its operands again. */
2278 gimple_set_modified (stmt, true);
2279 }
2280 }
2281
2282 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2283 known value for that SSA_NAME (or NULL if no value is known).
2284
2285 Propagate values from CONST_AND_COPIES into the uses, vuses and
2286 vdef_ops of STMT. */
2287
2288 static void
2289 cprop_into_stmt (gimple stmt)
2290 {
2291 use_operand_p op_p;
2292 ssa_op_iter iter;
2293
2294 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2295 cprop_operand (stmt, op_p);
2296 }
2297
2298 /* Optimize the statement pointed to by iterator SI.
2299
2300 We try to perform some simplistic global redundancy elimination and
2301 constant propagation:
2302
2303 1- To detect global redundancy, we keep track of expressions that have
2304 been computed in this block and its dominators. If we find that the
2305 same expression is computed more than once, we eliminate repeated
2306 computations by using the target of the first one.
2307
2308 2- Constant values and copy assignments. This is used to do very
2309 simplistic constant and copy propagation. When a constant or copy
2310 assignment is found, we map the value on the RHS of the assignment to
2311 the variable in the LHS in the CONST_AND_COPIES table. */
2312
2313 static void
2314 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2315 {
2316 gimple stmt, old_stmt;
2317 bool may_optimize_p;
2318 bool modified_p = false;
2319 bool was_noreturn;
2320
2321 old_stmt = stmt = gsi_stmt (si);
2322 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
2323
2324 if (dump_file && (dump_flags & TDF_DETAILS))
2325 {
2326 fprintf (dump_file, "Optimizing statement ");
2327 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2328 }
2329
2330 if (gimple_code (stmt) == GIMPLE_COND)
2331 canonicalize_comparison (as_a <gcond *> (stmt));
2332
2333 update_stmt_if_modified (stmt);
2334 opt_stats.num_stmts++;
2335
2336 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2337 cprop_into_stmt (stmt);
2338
2339 /* If the statement has been modified with constant replacements,
2340 fold its RHS before checking for redundant computations. */
2341 if (gimple_modified_p (stmt))
2342 {
2343 tree rhs = NULL;
2344
2345 /* Try to fold the statement making sure that STMT is kept
2346 up to date. */
2347 if (fold_stmt (&si))
2348 {
2349 stmt = gsi_stmt (si);
2350 gimple_set_modified (stmt, true);
2351
2352 if (dump_file && (dump_flags & TDF_DETAILS))
2353 {
2354 fprintf (dump_file, " Folded to: ");
2355 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2356 }
2357 }
2358
2359 /* We only need to consider cases that can yield a gimple operand. */
2360 if (gimple_assign_single_p (stmt))
2361 rhs = gimple_assign_rhs1 (stmt);
2362 else if (gimple_code (stmt) == GIMPLE_GOTO)
2363 rhs = gimple_goto_dest (stmt);
2364 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2365 /* This should never be an ADDR_EXPR. */
2366 rhs = gimple_switch_index (swtch_stmt);
2367
2368 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2369 recompute_tree_invariant_for_addr_expr (rhs);
2370
2371 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2372 even if fold_stmt updated the stmt already and thus cleared
2373 gimple_modified_p flag on it. */
2374 modified_p = true;
2375 }
2376
2377 /* Check for redundant computations. Do this optimization only
2378 for assignments that have no volatile ops and conditionals. */
2379 may_optimize_p = (!gimple_has_side_effects (stmt)
2380 && (is_gimple_assign (stmt)
2381 || (is_gimple_call (stmt)
2382 && gimple_call_lhs (stmt) != NULL_TREE)
2383 || gimple_code (stmt) == GIMPLE_COND
2384 || gimple_code (stmt) == GIMPLE_SWITCH));
2385
2386 if (may_optimize_p)
2387 {
2388 if (gimple_code (stmt) == GIMPLE_CALL)
2389 {
2390 /* Resolve __builtin_constant_p. If it hasn't been
2391 folded to integer_one_node by now, it's fairly
2392 certain that the value simply isn't constant. */
2393 tree callee = gimple_call_fndecl (stmt);
2394 if (callee
2395 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2396 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2397 {
2398 propagate_tree_value_into_stmt (&si, integer_zero_node);
2399 stmt = gsi_stmt (si);
2400 }
2401 }
2402
2403 update_stmt_if_modified (stmt);
2404 eliminate_redundant_computations (&si);
2405 stmt = gsi_stmt (si);
2406
2407 /* Perform simple redundant store elimination. */
2408 if (gimple_assign_single_p (stmt)
2409 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2410 {
2411 tree lhs = gimple_assign_lhs (stmt);
2412 tree rhs = gimple_assign_rhs1 (stmt);
2413 tree cached_lhs;
2414 gassign *new_stmt;
2415 if (TREE_CODE (rhs) == SSA_NAME)
2416 {
2417 tree tem = SSA_NAME_VALUE (rhs);
2418 if (tem)
2419 rhs = tem;
2420 }
2421 /* Build a new statement with the RHS and LHS exchanged. */
2422 if (TREE_CODE (rhs) == SSA_NAME)
2423 {
2424 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2425 new_stmt = gimple_build_assign (rhs, lhs);
2426 SSA_NAME_DEF_STMT (rhs) = defstmt;
2427 }
2428 else
2429 new_stmt = gimple_build_assign (rhs, lhs);
2430 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2431 cached_lhs = lookup_avail_expr (new_stmt, false);
2432 if (cached_lhs
2433 && rhs == cached_lhs)
2434 {
2435 basic_block bb = gimple_bb (stmt);
2436 unlink_stmt_vdef (stmt);
2437 if (gsi_remove (&si, true))
2438 {
2439 bitmap_set_bit (need_eh_cleanup, bb->index);
2440 if (dump_file && (dump_flags & TDF_DETAILS))
2441 fprintf (dump_file, " Flagged to clear EH edges.\n");
2442 }
2443 release_defs (stmt);
2444 return;
2445 }
2446 }
2447 }
2448
2449 /* Record any additional equivalences created by this statement. */
2450 if (is_gimple_assign (stmt))
2451 record_equivalences_from_stmt (stmt, may_optimize_p);
2452
2453 /* If STMT is a COND_EXPR and it was modified, then we may know
2454 where it goes. If that is the case, then mark the CFG as altered.
2455
2456 This will cause us to later call remove_unreachable_blocks and
2457 cleanup_tree_cfg when it is safe to do so. It is not safe to
2458 clean things up here since removal of edges and such can trigger
2459 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2460 the manager.
2461
2462 That's all fine and good, except that once SSA_NAMEs are released
2463 to the manager, we must not call create_ssa_name until all references
2464 to released SSA_NAMEs have been eliminated.
2465
2466 All references to the deleted SSA_NAMEs can not be eliminated until
2467 we remove unreachable blocks.
2468
2469 We can not remove unreachable blocks until after we have completed
2470 any queued jump threading.
2471
2472 We can not complete any queued jump threads until we have taken
2473 appropriate variables out of SSA form. Taking variables out of
2474 SSA form can call create_ssa_name and thus we lose.
2475
2476 Ultimately I suspect we're going to need to change the interface
2477 into the SSA_NAME manager. */
2478 if (gimple_modified_p (stmt) || modified_p)
2479 {
2480 tree val = NULL;
2481
2482 update_stmt_if_modified (stmt);
2483
2484 if (gimple_code (stmt) == GIMPLE_COND)
2485 val = fold_binary_loc (gimple_location (stmt),
2486 gimple_cond_code (stmt), boolean_type_node,
2487 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2488 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2489 val = gimple_switch_index (swtch_stmt);
2490
2491 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2492 cfg_altered = true;
2493
2494 /* If we simplified a statement in such a way as to be shown that it
2495 cannot trap, update the eh information and the cfg to match. */
2496 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2497 {
2498 bitmap_set_bit (need_eh_cleanup, bb->index);
2499 if (dump_file && (dump_flags & TDF_DETAILS))
2500 fprintf (dump_file, " Flagged to clear EH edges.\n");
2501 }
2502
2503 if (!was_noreturn
2504 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2505 need_noreturn_fixup.safe_push (stmt);
2506 }
2507 }
2508
2509 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
2510 the desired memory state. */
2511
2512 static void *
2513 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
2514 {
2515 tree vuse2 = (tree) data;
2516 if (vuse1 == vuse2)
2517 return data;
2518
2519 /* This bounds the stmt walks we perform on reference lookups
2520 to O(1) instead of O(N) where N is the number of dominating
2521 stores leading to a candidate. We re-use the SCCVN param
2522 for this as it is basically the same complexity. */
2523 if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
2524 return (void *)-1;
2525
2526 return NULL;
2527 }
2528
2529 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2530 If found, return its LHS. Otherwise insert STMT in the table and
2531 return NULL_TREE.
2532
2533 Also, when an expression is first inserted in the table, it is also
2534 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2535 we finish processing this block and its children. */
2536
2537 static tree
2538 lookup_avail_expr (gimple stmt, bool insert)
2539 {
2540 expr_hash_elt **slot;
2541 tree lhs;
2542 tree temp;
2543 struct expr_hash_elt element;
2544
2545 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2546 if (gimple_code (stmt) == GIMPLE_PHI)
2547 lhs = gimple_phi_result (stmt);
2548 else
2549 lhs = gimple_get_lhs (stmt);
2550
2551 initialize_hash_element (stmt, lhs, &element);
2552
2553 if (dump_file && (dump_flags & TDF_DETAILS))
2554 {
2555 fprintf (dump_file, "LKUP ");
2556 print_expr_hash_elt (dump_file, &element);
2557 }
2558
2559 /* Don't bother remembering constant assignments and copy operations.
2560 Constants and copy operations are handled by the constant/copy propagator
2561 in optimize_stmt. */
2562 if (element.expr.kind == EXPR_SINGLE
2563 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2564 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2565 return NULL_TREE;
2566
2567 /* Finally try to find the expression in the main expression hash table. */
2568 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2569 if (slot == NULL)
2570 {
2571 free_expr_hash_elt_contents (&element);
2572 return NULL_TREE;
2573 }
2574 else if (*slot == NULL)
2575 {
2576 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2577 *element2 = element;
2578 element2->stamp = element2;
2579 *slot = element2;
2580
2581 if (dump_file && (dump_flags & TDF_DETAILS))
2582 {
2583 fprintf (dump_file, "2>>> ");
2584 print_expr_hash_elt (dump_file, element2);
2585 }
2586
2587 avail_exprs_stack.safe_push
2588 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element2, NULL));
2589 return NULL_TREE;
2590 }
2591
2592 /* If we found a redundant memory operation do an alias walk to
2593 check if we can re-use it. */
2594 if (gimple_vuse (stmt) != (*slot)->vop)
2595 {
2596 tree vuse1 = (*slot)->vop;
2597 tree vuse2 = gimple_vuse (stmt);
2598 /* If we have a load of a register and a candidate in the
2599 hash with vuse1 then try to reach its stmt by walking
2600 up the virtual use-def chain using walk_non_aliased_vuses.
2601 But don't do this when removing expressions from the hash. */
2602 ao_ref ref;
2603 if (!(vuse1 && vuse2
2604 && gimple_assign_single_p (stmt)
2605 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
2606 && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)), true)
2607 && walk_non_aliased_vuses (&ref, vuse2,
2608 vuse_eq, NULL, NULL, vuse1) != NULL))
2609 {
2610 if (insert)
2611 {
2612 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2613 *element2 = element;
2614 element2->stamp = element2;
2615
2616 /* Insert the expr into the hash by replacing the current
2617 entry and recording the value to restore in the
2618 avail_exprs_stack. */
2619 avail_exprs_stack.safe_push (std::make_pair (element2, *slot));
2620 *slot = element2;
2621 if (dump_file && (dump_flags & TDF_DETAILS))
2622 {
2623 fprintf (dump_file, "2>>> ");
2624 print_expr_hash_elt (dump_file, *slot);
2625 }
2626 }
2627 return NULL_TREE;
2628 }
2629 }
2630
2631 free_expr_hash_elt_contents (&element);
2632
2633 /* Extract the LHS of the assignment so that it can be used as the current
2634 definition of another variable. */
2635 lhs = (*slot)->lhs;
2636
2637 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2638 use the value from the const_and_copies table. */
2639 if (TREE_CODE (lhs) == SSA_NAME)
2640 {
2641 temp = SSA_NAME_VALUE (lhs);
2642 if (temp)
2643 lhs = temp;
2644 }
2645
2646 if (dump_file && (dump_flags & TDF_DETAILS))
2647 {
2648 fprintf (dump_file, "FIND: ");
2649 print_generic_expr (dump_file, lhs, 0);
2650 fprintf (dump_file, "\n");
2651 }
2652
2653 return lhs;
2654 }
2655
2656 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2657 for expressions using the code of the expression and the SSA numbers of
2658 its operands. */
2659
2660 static hashval_t
2661 avail_expr_hash (const void *p)
2662 {
2663 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2664 inchash::hash hstate;
2665
2666 inchash::add_hashable_expr (expr, hstate);
2667
2668 return hstate.end ();
2669 }
2670
2671 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2672 up degenerate PHIs created by or exposed by jump threading. */
2673
2674 /* Given a statement STMT, which is either a PHI node or an assignment,
2675 remove it from the IL. */
2676
2677 static void
2678 remove_stmt_or_phi (gimple stmt)
2679 {
2680 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2681
2682 if (gimple_code (stmt) == GIMPLE_PHI)
2683 remove_phi_node (&gsi, true);
2684 else
2685 {
2686 gsi_remove (&gsi, true);
2687 release_defs (stmt);
2688 }
2689 }
2690
2691 /* Given a statement STMT, which is either a PHI node or an assignment,
2692 return the "rhs" of the node, in the case of a non-degenerate
2693 phi, NULL is returned. */
2694
2695 static tree
2696 get_rhs_or_phi_arg (gimple stmt)
2697 {
2698 if (gimple_code (stmt) == GIMPLE_PHI)
2699 return degenerate_phi_result (as_a <gphi *> (stmt));
2700 else if (gimple_assign_single_p (stmt))
2701 return gimple_assign_rhs1 (stmt);
2702 else
2703 gcc_unreachable ();
2704 }
2705
2706
2707 /* Given a statement STMT, which is either a PHI node or an assignment,
2708 return the "lhs" of the node. */
2709
2710 static tree
2711 get_lhs_or_phi_result (gimple stmt)
2712 {
2713 if (gimple_code (stmt) == GIMPLE_PHI)
2714 return gimple_phi_result (stmt);
2715 else if (is_gimple_assign (stmt))
2716 return gimple_assign_lhs (stmt);
2717 else
2718 gcc_unreachable ();
2719 }
2720
2721 /* Propagate RHS into all uses of LHS (when possible).
2722
2723 RHS and LHS are derived from STMT, which is passed in solely so
2724 that we can remove it if propagation is successful.
2725
2726 When propagating into a PHI node or into a statement which turns
2727 into a trivial copy or constant initialization, set the
2728 appropriate bit in INTERESTING_NAMEs so that we will visit those
2729 nodes as well in an effort to pick up secondary optimization
2730 opportunities. */
2731
2732 static void
2733 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2734 {
2735 /* First verify that propagation is valid. */
2736 if (may_propagate_copy (lhs, rhs))
2737 {
2738 use_operand_p use_p;
2739 imm_use_iterator iter;
2740 gimple use_stmt;
2741 bool all = true;
2742
2743 /* Dump details. */
2744 if (dump_file && (dump_flags & TDF_DETAILS))
2745 {
2746 fprintf (dump_file, " Replacing '");
2747 print_generic_expr (dump_file, lhs, dump_flags);
2748 fprintf (dump_file, "' with %s '",
2749 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2750 print_generic_expr (dump_file, rhs, dump_flags);
2751 fprintf (dump_file, "'\n");
2752 }
2753
2754 /* Walk over every use of LHS and try to replace the use with RHS.
2755 At this point the only reason why such a propagation would not
2756 be successful would be if the use occurs in an ASM_EXPR. */
2757 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2758 {
2759 /* Leave debug stmts alone. If we succeed in propagating
2760 all non-debug uses, we'll drop the DEF, and propagation
2761 into debug stmts will occur then. */
2762 if (gimple_debug_bind_p (use_stmt))
2763 continue;
2764
2765 /* It's not always safe to propagate into an ASM_EXPR. */
2766 if (gimple_code (use_stmt) == GIMPLE_ASM
2767 && ! may_propagate_copy_into_asm (lhs))
2768 {
2769 all = false;
2770 continue;
2771 }
2772
2773 /* It's not ok to propagate into the definition stmt of RHS.
2774 <bb 9>:
2775 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2776 g_67.1_6 = prephitmp.12_36;
2777 goto <bb 9>;
2778 While this is strictly all dead code we do not want to
2779 deal with this here. */
2780 if (TREE_CODE (rhs) == SSA_NAME
2781 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2782 {
2783 all = false;
2784 continue;
2785 }
2786
2787 /* Dump details. */
2788 if (dump_file && (dump_flags & TDF_DETAILS))
2789 {
2790 fprintf (dump_file, " Original statement:");
2791 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2792 }
2793
2794 /* Propagate the RHS into this use of the LHS. */
2795 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2796 propagate_value (use_p, rhs);
2797
2798 /* Special cases to avoid useless calls into the folding
2799 routines, operand scanning, etc.
2800
2801 Propagation into a PHI may cause the PHI to become
2802 a degenerate, so mark the PHI as interesting. No other
2803 actions are necessary. */
2804 if (gimple_code (use_stmt) == GIMPLE_PHI)
2805 {
2806 tree result;
2807
2808 /* Dump details. */
2809 if (dump_file && (dump_flags & TDF_DETAILS))
2810 {
2811 fprintf (dump_file, " Updated statement:");
2812 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2813 }
2814
2815 result = get_lhs_or_phi_result (use_stmt);
2816 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2817 continue;
2818 }
2819
2820 /* From this point onward we are propagating into a
2821 real statement. Folding may (or may not) be possible,
2822 we may expose new operands, expose dead EH edges,
2823 etc. */
2824 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2825 cannot fold a call that simplifies to a constant,
2826 because the GIMPLE_CALL must be replaced by a
2827 GIMPLE_ASSIGN, and there is no way to effect such a
2828 transformation in-place. We might want to consider
2829 using the more general fold_stmt here. */
2830 {
2831 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2832 fold_stmt_inplace (&gsi);
2833 }
2834
2835 /* Sometimes propagation can expose new operands to the
2836 renamer. */
2837 update_stmt (use_stmt);
2838
2839 /* Dump details. */
2840 if (dump_file && (dump_flags & TDF_DETAILS))
2841 {
2842 fprintf (dump_file, " Updated statement:");
2843 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2844 }
2845
2846 /* If we replaced a variable index with a constant, then
2847 we would need to update the invariant flag for ADDR_EXPRs. */
2848 if (gimple_assign_single_p (use_stmt)
2849 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2850 recompute_tree_invariant_for_addr_expr
2851 (gimple_assign_rhs1 (use_stmt));
2852
2853 /* If we cleaned up EH information from the statement,
2854 mark its containing block as needing EH cleanups. */
2855 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2856 {
2857 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2858 if (dump_file && (dump_flags & TDF_DETAILS))
2859 fprintf (dump_file, " Flagged to clear EH edges.\n");
2860 }
2861
2862 /* Propagation may expose new trivial copy/constant propagation
2863 opportunities. */
2864 if (gimple_assign_single_p (use_stmt)
2865 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2866 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2867 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2868 {
2869 tree result = get_lhs_or_phi_result (use_stmt);
2870 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2871 }
2872
2873 /* Propagation into these nodes may make certain edges in
2874 the CFG unexecutable. We want to identify them as PHI nodes
2875 at the destination of those unexecutable edges may become
2876 degenerates. */
2877 else if (gimple_code (use_stmt) == GIMPLE_COND
2878 || gimple_code (use_stmt) == GIMPLE_SWITCH
2879 || gimple_code (use_stmt) == GIMPLE_GOTO)
2880 {
2881 tree val;
2882
2883 if (gimple_code (use_stmt) == GIMPLE_COND)
2884 val = fold_binary_loc (gimple_location (use_stmt),
2885 gimple_cond_code (use_stmt),
2886 boolean_type_node,
2887 gimple_cond_lhs (use_stmt),
2888 gimple_cond_rhs (use_stmt));
2889 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2890 val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2891 else
2892 val = gimple_goto_dest (use_stmt);
2893
2894 if (val && is_gimple_min_invariant (val))
2895 {
2896 basic_block bb = gimple_bb (use_stmt);
2897 edge te = find_taken_edge (bb, val);
2898 if (!te)
2899 continue;
2900
2901 edge_iterator ei;
2902 edge e;
2903 gimple_stmt_iterator gsi;
2904 gphi_iterator psi;
2905
2906 /* Remove all outgoing edges except TE. */
2907 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2908 {
2909 if (e != te)
2910 {
2911 /* Mark all the PHI nodes at the destination of
2912 the unexecutable edge as interesting. */
2913 for (psi = gsi_start_phis (e->dest);
2914 !gsi_end_p (psi);
2915 gsi_next (&psi))
2916 {
2917 gphi *phi = psi.phi ();
2918
2919 tree result = gimple_phi_result (phi);
2920 int version = SSA_NAME_VERSION (result);
2921
2922 bitmap_set_bit (interesting_names, version);
2923 }
2924
2925 te->probability += e->probability;
2926
2927 te->count += e->count;
2928 remove_edge (e);
2929 cfg_altered = true;
2930 }
2931 else
2932 ei_next (&ei);
2933 }
2934
2935 gsi = gsi_last_bb (gimple_bb (use_stmt));
2936 gsi_remove (&gsi, true);
2937
2938 /* And fixup the flags on the single remaining edge. */
2939 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2940 te->flags &= ~EDGE_ABNORMAL;
2941 te->flags |= EDGE_FALLTHRU;
2942 if (te->probability > REG_BR_PROB_BASE)
2943 te->probability = REG_BR_PROB_BASE;
2944 }
2945 }
2946 }
2947
2948 /* Ensure there is nothing else to do. */
2949 gcc_assert (!all || has_zero_uses (lhs));
2950
2951 /* If we were able to propagate away all uses of LHS, then
2952 we can remove STMT. */
2953 if (all)
2954 remove_stmt_or_phi (stmt);
2955 }
2956 }
2957
2958 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2959 a statement that is a trivial copy or constant initialization.
2960
2961 Attempt to eliminate T by propagating its RHS into all uses of
2962 its LHS. This may in turn set new bits in INTERESTING_NAMES
2963 for nodes we want to revisit later.
2964
2965 All exit paths should clear INTERESTING_NAMES for the result
2966 of STMT. */
2967
2968 static void
2969 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2970 {
2971 tree lhs = get_lhs_or_phi_result (stmt);
2972 tree rhs;
2973 int version = SSA_NAME_VERSION (lhs);
2974
2975 /* If the LHS of this statement or PHI has no uses, then we can
2976 just eliminate it. This can occur if, for example, the PHI
2977 was created by block duplication due to threading and its only
2978 use was in the conditional at the end of the block which was
2979 deleted. */
2980 if (has_zero_uses (lhs))
2981 {
2982 bitmap_clear_bit (interesting_names, version);
2983 remove_stmt_or_phi (stmt);
2984 return;
2985 }
2986
2987 /* Get the RHS of the assignment or PHI node if the PHI is a
2988 degenerate. */
2989 rhs = get_rhs_or_phi_arg (stmt);
2990 if (!rhs)
2991 {
2992 bitmap_clear_bit (interesting_names, version);
2993 return;
2994 }
2995
2996 if (!virtual_operand_p (lhs))
2997 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2998 else
2999 {
3000 gimple use_stmt;
3001 imm_use_iterator iter;
3002 use_operand_p use_p;
3003 /* For virtual operands we have to propagate into all uses as
3004 otherwise we will create overlapping life-ranges. */
3005 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3006 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3007 SET_USE (use_p, rhs);
3008 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3009 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3010 remove_stmt_or_phi (stmt);
3011 }
3012
3013 /* Note that STMT may well have been deleted by now, so do
3014 not access it, instead use the saved version # to clear
3015 T's entry in the worklist. */
3016 bitmap_clear_bit (interesting_names, version);
3017 }
3018
3019 /* The first phase in degenerate PHI elimination.
3020
3021 Eliminate the degenerate PHIs in BB, then recurse on the
3022 dominator children of BB. */
3023
3024 static void
3025 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
3026 {
3027 gphi_iterator gsi;
3028 basic_block son;
3029
3030 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3031 {
3032 gphi *phi = gsi.phi ();
3033
3034 eliminate_const_or_copy (phi, interesting_names);
3035 }
3036
3037 /* Recurse into the dominator children of BB. */
3038 for (son = first_dom_son (CDI_DOMINATORS, bb);
3039 son;
3040 son = next_dom_son (CDI_DOMINATORS, son))
3041 eliminate_degenerate_phis_1 (son, interesting_names);
3042 }
3043
3044
3045 /* A very simple pass to eliminate degenerate PHI nodes from the
3046 IL. This is meant to be fast enough to be able to be run several
3047 times in the optimization pipeline.
3048
3049 Certain optimizations, particularly those which duplicate blocks
3050 or remove edges from the CFG can create or expose PHIs which are
3051 trivial copies or constant initializations.
3052
3053 While we could pick up these optimizations in DOM or with the
3054 combination of copy-prop and CCP, those solutions are far too
3055 heavy-weight for our needs.
3056
3057 This implementation has two phases so that we can efficiently
3058 eliminate the first order degenerate PHIs and second order
3059 degenerate PHIs.
3060
3061 The first phase performs a dominator walk to identify and eliminate
3062 the vast majority of the degenerate PHIs. When a degenerate PHI
3063 is identified and eliminated any affected statements or PHIs
3064 are put on a worklist.
3065
3066 The second phase eliminates degenerate PHIs and trivial copies
3067 or constant initializations using the worklist. This is how we
3068 pick up the secondary optimization opportunities with minimal
3069 cost. */
3070
3071 namespace {
3072
3073 const pass_data pass_data_phi_only_cprop =
3074 {
3075 GIMPLE_PASS, /* type */
3076 "phicprop", /* name */
3077 OPTGROUP_NONE, /* optinfo_flags */
3078 TV_TREE_PHI_CPROP, /* tv_id */
3079 ( PROP_cfg | PROP_ssa ), /* properties_required */
3080 0, /* properties_provided */
3081 0, /* properties_destroyed */
3082 0, /* todo_flags_start */
3083 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3084 };
3085
3086 class pass_phi_only_cprop : public gimple_opt_pass
3087 {
3088 public:
3089 pass_phi_only_cprop (gcc::context *ctxt)
3090 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3091 {}
3092
3093 /* opt_pass methods: */
3094 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3095 virtual bool gate (function *) { return flag_tree_dom != 0; }
3096 virtual unsigned int execute (function *);
3097
3098 }; // class pass_phi_only_cprop
3099
3100 unsigned int
3101 pass_phi_only_cprop::execute (function *fun)
3102 {
3103 bitmap interesting_names;
3104 bitmap interesting_names1;
3105
3106 /* Bitmap of blocks which need EH information updated. We can not
3107 update it on-the-fly as doing so invalidates the dominator tree. */
3108 need_eh_cleanup = BITMAP_ALLOC (NULL);
3109
3110 /* INTERESTING_NAMES is effectively our worklist, indexed by
3111 SSA_NAME_VERSION.
3112
3113 A set bit indicates that the statement or PHI node which
3114 defines the SSA_NAME should be (re)examined to determine if
3115 it has become a degenerate PHI or trivial const/copy propagation
3116 opportunity.
3117
3118 Experiments have show we generally get better compilation
3119 time behavior with bitmaps rather than sbitmaps. */
3120 interesting_names = BITMAP_ALLOC (NULL);
3121 interesting_names1 = BITMAP_ALLOC (NULL);
3122
3123 calculate_dominance_info (CDI_DOMINATORS);
3124 cfg_altered = false;
3125
3126 /* First phase. Eliminate degenerate PHIs via a dominator
3127 walk of the CFG.
3128
3129 Experiments have indicated that we generally get better
3130 compile-time behavior by visiting blocks in the first
3131 phase in dominator order. Presumably this is because walking
3132 in dominator order leaves fewer PHIs for later examination
3133 by the worklist phase. */
3134 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3135 interesting_names);
3136
3137 /* Second phase. Eliminate second order degenerate PHIs as well
3138 as trivial copies or constant initializations identified by
3139 the first phase or this phase. Basically we keep iterating
3140 until our set of INTERESTING_NAMEs is empty. */
3141 while (!bitmap_empty_p (interesting_names))
3142 {
3143 unsigned int i;
3144 bitmap_iterator bi;
3145
3146 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3147 changed during the loop. Copy it to another bitmap and
3148 use that. */
3149 bitmap_copy (interesting_names1, interesting_names);
3150
3151 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3152 {
3153 tree name = ssa_name (i);
3154
3155 /* Ignore SSA_NAMEs that have been released because
3156 their defining statement was deleted (unreachable). */
3157 if (name)
3158 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3159 interesting_names);
3160 }
3161 }
3162
3163 if (cfg_altered)
3164 {
3165 free_dominance_info (CDI_DOMINATORS);
3166 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3167 loops_state_set (LOOPS_NEED_FIXUP);
3168 }
3169
3170 /* Propagation of const and copies may make some EH edges dead. Purge
3171 such edges from the CFG as needed. */
3172 if (!bitmap_empty_p (need_eh_cleanup))
3173 {
3174 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3175 BITMAP_FREE (need_eh_cleanup);
3176 }
3177
3178 BITMAP_FREE (interesting_names);
3179 BITMAP_FREE (interesting_names1);
3180 return 0;
3181 }
3182
3183 } // anon namespace
3184
3185 gimple_opt_pass *
3186 make_pass_phi_only_cprop (gcc::context *ctxt)
3187 {
3188 return new pass_phi_only_cprop (ctxt);
3189 }