]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-phiopt.c
tree-ssa-phiopt.c (factor_out_conditional_conversion): Remove redundant test and...
[thirdparty/gcc.git] / gcc / tree-ssa-phiopt.c
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
36 #include "cfganal.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
40 #include "tree-cfg.h"
41 #include "tree-dfa.h"
42 #include "domwalk.h"
43 #include "cfgloop.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "params.h"
48
49 static unsigned int tree_ssa_phiopt_worker (bool, bool);
50 static bool conditional_replacement (basic_block, basic_block,
51 edge, edge, gphi *, tree, tree);
52 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree);
53 static int value_replacement (basic_block, basic_block,
54 edge, edge, gimple *, tree, tree);
55 static bool minmax_replacement (basic_block, basic_block,
56 edge, edge, gimple *, tree, tree);
57 static bool abs_replacement (basic_block, basic_block,
58 edge, edge, gimple *, tree, tree);
59 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
60 hash_set<tree> *);
61 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
62 static hash_set<tree> * get_non_trapping ();
63 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
64 static void hoist_adjacent_loads (basic_block, basic_block,
65 basic_block, basic_block);
66 static bool gate_hoist_loads (void);
67
68 /* This pass tries to transform conditional stores into unconditional
69 ones, enabling further simplifications with the simpler then and else
70 blocks. In particular it replaces this:
71
72 bb0:
73 if (cond) goto bb2; else goto bb1;
74 bb1:
75 *p = RHS;
76 bb2:
77
78 with
79
80 bb0:
81 if (cond) goto bb1; else goto bb2;
82 bb1:
83 condtmp' = *p;
84 bb2:
85 condtmp = PHI <RHS, condtmp'>
86 *p = condtmp;
87
88 This transformation can only be done under several constraints,
89 documented below. It also replaces:
90
91 bb0:
92 if (cond) goto bb2; else goto bb1;
93 bb1:
94 *p = RHS1;
95 goto bb3;
96 bb2:
97 *p = RHS2;
98 bb3:
99
100 with
101
102 bb0:
103 if (cond) goto bb3; else goto bb1;
104 bb1:
105 bb3:
106 condtmp = PHI <RHS1, RHS2>
107 *p = condtmp; */
108
109 static unsigned int
110 tree_ssa_cs_elim (void)
111 {
112 unsigned todo;
113 /* ??? We are not interested in loop related info, but the following
114 will create it, ICEing as we didn't init loops with pre-headers.
115 An interfacing issue of find_data_references_in_bb. */
116 loop_optimizer_init (LOOPS_NORMAL);
117 scev_initialize ();
118 todo = tree_ssa_phiopt_worker (true, false);
119 scev_finalize ();
120 loop_optimizer_finalize ();
121 return todo;
122 }
123
124 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
125
126 static gphi *
127 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
128 {
129 gimple_stmt_iterator i;
130 gphi *phi = NULL;
131 if (gimple_seq_singleton_p (seq))
132 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
133 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
134 {
135 gphi *p = as_a <gphi *> (gsi_stmt (i));
136 /* If the PHI arguments are equal then we can skip this PHI. */
137 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
138 gimple_phi_arg_def (p, e1->dest_idx)))
139 continue;
140
141 /* If we already have a PHI that has the two edge arguments are
142 different, then return it is not a singleton for these PHIs. */
143 if (phi)
144 return NULL;
145
146 phi = p;
147 }
148 return phi;
149 }
150
151 /* The core routine of conditional store replacement and normal
152 phi optimizations. Both share much of the infrastructure in how
153 to match applicable basic block patterns. DO_STORE_ELIM is true
154 when we want to do conditional store replacement, false otherwise.
155 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
156 of diamond control flow patterns, false otherwise. */
157 static unsigned int
158 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
159 {
160 basic_block bb;
161 basic_block *bb_order;
162 unsigned n, i;
163 bool cfgchanged = false;
164 hash_set<tree> *nontrap = 0;
165
166 if (do_store_elim)
167 /* Calculate the set of non-trapping memory accesses. */
168 nontrap = get_non_trapping ();
169
170 /* Search every basic block for COND_EXPR we may be able to optimize.
171
172 We walk the blocks in order that guarantees that a block with
173 a single predecessor is processed before the predecessor.
174 This ensures that we collapse inner ifs before visiting the
175 outer ones, and also that we do not try to visit a removed
176 block. */
177 bb_order = single_pred_before_succ_order ();
178 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
179
180 for (i = 0; i < n; i++)
181 {
182 gimple *cond_stmt;
183 gphi *phi;
184 basic_block bb1, bb2;
185 edge e1, e2;
186 tree arg0, arg1;
187
188 bb = bb_order[i];
189
190 cond_stmt = last_stmt (bb);
191 /* Check to see if the last statement is a GIMPLE_COND. */
192 if (!cond_stmt
193 || gimple_code (cond_stmt) != GIMPLE_COND)
194 continue;
195
196 e1 = EDGE_SUCC (bb, 0);
197 bb1 = e1->dest;
198 e2 = EDGE_SUCC (bb, 1);
199 bb2 = e2->dest;
200
201 /* We cannot do the optimization on abnormal edges. */
202 if ((e1->flags & EDGE_ABNORMAL) != 0
203 || (e2->flags & EDGE_ABNORMAL) != 0)
204 continue;
205
206 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
207 if (EDGE_COUNT (bb1->succs) == 0
208 || bb2 == NULL
209 || EDGE_COUNT (bb2->succs) == 0)
210 continue;
211
212 /* Find the bb which is the fall through to the other. */
213 if (EDGE_SUCC (bb1, 0)->dest == bb2)
214 ;
215 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
216 {
217 std::swap (bb1, bb2);
218 std::swap (e1, e2);
219 }
220 else if (do_store_elim
221 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
222 {
223 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
224
225 if (!single_succ_p (bb1)
226 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
227 || !single_succ_p (bb2)
228 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
229 || EDGE_COUNT (bb3->preds) != 2)
230 continue;
231 if (cond_if_else_store_replacement (bb1, bb2, bb3))
232 cfgchanged = true;
233 continue;
234 }
235 else if (do_hoist_loads
236 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
237 {
238 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
239
240 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
241 && single_succ_p (bb1)
242 && single_succ_p (bb2)
243 && single_pred_p (bb1)
244 && single_pred_p (bb2)
245 && EDGE_COUNT (bb->succs) == 2
246 && EDGE_COUNT (bb3->preds) == 2
247 /* If one edge or the other is dominant, a conditional move
248 is likely to perform worse than the well-predicted branch. */
249 && !predictable_edge_p (EDGE_SUCC (bb, 0))
250 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
251 hoist_adjacent_loads (bb, bb1, bb2, bb3);
252 continue;
253 }
254 else
255 continue;
256
257 e1 = EDGE_SUCC (bb1, 0);
258
259 /* Make sure that bb1 is just a fall through. */
260 if (!single_succ_p (bb1)
261 || (e1->flags & EDGE_FALLTHRU) == 0)
262 continue;
263
264 /* Also make sure that bb1 only have one predecessor and that it
265 is bb. */
266 if (!single_pred_p (bb1)
267 || single_pred (bb1) != bb)
268 continue;
269
270 if (do_store_elim)
271 {
272 /* bb1 is the middle block, bb2 the join block, bb the split block,
273 e1 the fallthrough edge from bb1 to bb2. We can't do the
274 optimization if the join block has more than two predecessors. */
275 if (EDGE_COUNT (bb2->preds) > 2)
276 continue;
277 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
278 cfgchanged = true;
279 }
280 else
281 {
282 gimple_seq phis = phi_nodes (bb2);
283 gimple_stmt_iterator gsi;
284 bool candorest = true;
285
286 /* Value replacement can work with more than one PHI
287 so try that first. */
288 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
289 {
290 phi = as_a <gphi *> (gsi_stmt (gsi));
291 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
292 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
293 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
294 {
295 candorest = false;
296 cfgchanged = true;
297 break;
298 }
299 }
300
301 if (!candorest)
302 continue;
303
304 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
305 if (!phi)
306 continue;
307
308 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
309 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
310
311 /* Something is wrong if we cannot find the arguments in the PHI
312 node. */
313 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
314
315 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
316 arg0, arg1);
317 if (newphi != NULL)
318 {
319 phi = newphi;
320 /* factor_out_conditional_conversion may create a new PHI in
321 BB2 and eliminate an existing PHI in BB2. Recompute values
322 that may be affected by that change. */
323 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
324 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
325 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
326 }
327
328 /* Do the replacement of conditional if it can be done. */
329 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
330 cfgchanged = true;
331 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
332 cfgchanged = true;
333 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
334 cfgchanged = true;
335 }
336 }
337
338 free (bb_order);
339
340 if (do_store_elim)
341 delete nontrap;
342 /* If the CFG has changed, we should cleanup the CFG. */
343 if (cfgchanged && do_store_elim)
344 {
345 /* In cond-store replacement we have added some loads on edges
346 and new VOPS (as we moved the store, and created a load). */
347 gsi_commit_edge_inserts ();
348 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
349 }
350 else if (cfgchanged)
351 return TODO_cleanup_cfg;
352 return 0;
353 }
354
355 /* Replace PHI node element whose edge is E in block BB with variable NEW.
356 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
357 is known to have two edges, one of which must reach BB). */
358
359 static void
360 replace_phi_edge_with_variable (basic_block cond_block,
361 edge e, gimple *phi, tree new_tree)
362 {
363 basic_block bb = gimple_bb (phi);
364 basic_block block_to_remove;
365 gimple_stmt_iterator gsi;
366
367 /* Change the PHI argument to new. */
368 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
369
370 /* Remove the empty basic block. */
371 if (EDGE_SUCC (cond_block, 0)->dest == bb)
372 {
373 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
374 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
375 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
376 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
377
378 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
379 }
380 else
381 {
382 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
383 EDGE_SUCC (cond_block, 1)->flags
384 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
385 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
386 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
387
388 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
389 }
390 delete_basic_block (block_to_remove);
391
392 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
393 gsi = gsi_last_bb (cond_block);
394 gsi_remove (&gsi, true);
395
396 if (dump_file && (dump_flags & TDF_DETAILS))
397 fprintf (dump_file,
398 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
399 cond_block->index,
400 bb->index);
401 }
402
403 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
404 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
405 to the result of PHI stmt. Return the newly-created PHI, if any. */
406
407 static gphi *
408 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
409 tree arg0, tree arg1)
410 {
411 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
412 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
413 tree temp, result;
414 gphi *newphi;
415 gimple_stmt_iterator gsi, gsi_for_def;
416 source_location locus = gimple_location (phi);
417 enum tree_code convert_code;
418
419 /* Handle only PHI statements with two arguments. TODO: If all
420 other arguments to PHI are INTEGER_CST or if their defining
421 statement have the same unary operation, we can handle more
422 than two arguments too. */
423 if (gimple_phi_num_args (phi) != 2)
424 return NULL;
425
426 /* First canonicalize to simplify tests. */
427 if (TREE_CODE (arg0) != SSA_NAME)
428 {
429 std::swap (arg0, arg1);
430 std::swap (e0, e1);
431 }
432
433 if (TREE_CODE (arg0) != SSA_NAME
434 || (TREE_CODE (arg1) != SSA_NAME
435 && TREE_CODE (arg1) != INTEGER_CST))
436 return NULL;
437
438 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
439 a conversion. */
440 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
441 if (!gimple_assign_cast_p (arg0_def_stmt))
442 return NULL;
443
444 /* Use the RHS as new_arg0. */
445 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
446 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
447 if (convert_code == VIEW_CONVERT_EXPR)
448 {
449 new_arg0 = TREE_OPERAND (new_arg0, 0);
450 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
451 return NULL;
452 }
453
454 if (TREE_CODE (arg1) == SSA_NAME)
455 {
456 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
457 is a conversion. */
458 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
459 if (!is_gimple_assign (arg1_def_stmt)
460 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
461 return NULL;
462
463 /* Use the RHS as new_arg1. */
464 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
465 if (convert_code == VIEW_CONVERT_EXPR)
466 new_arg1 = TREE_OPERAND (new_arg1, 0);
467 }
468 else
469 {
470 /* If arg1 is an INTEGER_CST, fold it to new type. */
471 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
472 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
473 {
474 if (gimple_assign_cast_p (arg0_def_stmt))
475 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
476 else
477 return NULL;
478 }
479 else
480 return NULL;
481 }
482
483 /* If arg0/arg1 have > 1 use, then this transformation actually increases
484 the number of expressions evaluated at runtime. */
485 if (!has_single_use (arg0)
486 || (arg1_def_stmt && !has_single_use (arg1)))
487 return NULL;
488
489 /* If types of new_arg0 and new_arg1 are different bailout. */
490 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
491 return NULL;
492
493 /* Create a new PHI stmt. */
494 result = PHI_RESULT (phi);
495 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
496 newphi = create_phi_node (temp, gimple_bb (phi));
497
498 if (dump_file && (dump_flags & TDF_DETAILS))
499 {
500 fprintf (dump_file, "PHI ");
501 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
502 fprintf (dump_file,
503 " changed to factor conversion out from COND_EXPR.\n");
504 fprintf (dump_file, "New stmt with CAST that defines ");
505 print_generic_expr (dump_file, result, 0);
506 fprintf (dump_file, ".\n");
507 }
508
509 /* Remove the old cast(s) that has single use. */
510 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
511 gsi_remove (&gsi_for_def, true);
512 release_defs (arg0_def_stmt);
513
514 if (arg1_def_stmt)
515 {
516 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
517 gsi_remove (&gsi_for_def, true);
518 release_defs (arg1_def_stmt);
519 }
520
521 add_phi_arg (newphi, new_arg0, e0, locus);
522 add_phi_arg (newphi, new_arg1, e1, locus);
523
524 /* Create the conversion stmt and insert it. */
525 if (convert_code == VIEW_CONVERT_EXPR)
526 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
527 new_stmt = gimple_build_assign (result, convert_code, temp);
528 gsi = gsi_after_labels (gimple_bb (phi));
529 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
530
531 /* Remove the original PHI stmt. */
532 gsi = gsi_for_stmt (phi);
533 gsi_remove (&gsi, true);
534 return newphi;
535 }
536
537 /* The function conditional_replacement does the main work of doing the
538 conditional replacement. Return true if the replacement is done.
539 Otherwise return false.
540 BB is the basic block where the replacement is going to be done on. ARG0
541 is argument 0 from PHI. Likewise for ARG1. */
542
543 static bool
544 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
545 edge e0, edge e1, gphi *phi,
546 tree arg0, tree arg1)
547 {
548 tree result;
549 gimple *stmt;
550 gassign *new_stmt;
551 tree cond;
552 gimple_stmt_iterator gsi;
553 edge true_edge, false_edge;
554 tree new_var, new_var2;
555 bool neg;
556
557 /* FIXME: Gimplification of complex type is too hard for now. */
558 /* We aren't prepared to handle vectors either (and it is a question
559 if it would be worthwhile anyway). */
560 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
561 || POINTER_TYPE_P (TREE_TYPE (arg0)))
562 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
563 || POINTER_TYPE_P (TREE_TYPE (arg1))))
564 return false;
565
566 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
567 convert it to the conditional. */
568 if ((integer_zerop (arg0) && integer_onep (arg1))
569 || (integer_zerop (arg1) && integer_onep (arg0)))
570 neg = false;
571 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
572 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
573 neg = true;
574 else
575 return false;
576
577 if (!empty_block_p (middle_bb))
578 return false;
579
580 /* At this point we know we have a GIMPLE_COND with two successors.
581 One successor is BB, the other successor is an empty block which
582 falls through into BB.
583
584 There is a single PHI node at the join point (BB) and its arguments
585 are constants (0, 1) or (0, -1).
586
587 So, given the condition COND, and the two PHI arguments, we can
588 rewrite this PHI into non-branching code:
589
590 dest = (COND) or dest = COND'
591
592 We use the condition as-is if the argument associated with the
593 true edge has the value one or the argument associated with the
594 false edge as the value zero. Note that those conditions are not
595 the same since only one of the outgoing edges from the GIMPLE_COND
596 will directly reach BB and thus be associated with an argument. */
597
598 stmt = last_stmt (cond_bb);
599 result = PHI_RESULT (phi);
600
601 /* To handle special cases like floating point comparison, it is easier and
602 less error-prone to build a tree and gimplify it on the fly though it is
603 less efficient. */
604 cond = fold_build2_loc (gimple_location (stmt),
605 gimple_cond_code (stmt), boolean_type_node,
606 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
607
608 /* We need to know which is the true edge and which is the false
609 edge so that we know when to invert the condition below. */
610 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
611 if ((e0 == true_edge && integer_zerop (arg0))
612 || (e0 == false_edge && !integer_zerop (arg0))
613 || (e1 == true_edge && integer_zerop (arg1))
614 || (e1 == false_edge && !integer_zerop (arg1)))
615 cond = fold_build1_loc (gimple_location (stmt),
616 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
617
618 if (neg)
619 {
620 cond = fold_convert_loc (gimple_location (stmt),
621 TREE_TYPE (result), cond);
622 cond = fold_build1_loc (gimple_location (stmt),
623 NEGATE_EXPR, TREE_TYPE (cond), cond);
624 }
625
626 /* Insert our new statements at the end of conditional block before the
627 COND_STMT. */
628 gsi = gsi_for_stmt (stmt);
629 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
630 GSI_SAME_STMT);
631
632 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
633 {
634 source_location locus_0, locus_1;
635
636 new_var2 = make_ssa_name (TREE_TYPE (result));
637 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
638 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
639 new_var = new_var2;
640
641 /* Set the locus to the first argument, unless is doesn't have one. */
642 locus_0 = gimple_phi_arg_location (phi, 0);
643 locus_1 = gimple_phi_arg_location (phi, 1);
644 if (locus_0 == UNKNOWN_LOCATION)
645 locus_0 = locus_1;
646 gimple_set_location (new_stmt, locus_0);
647 }
648
649 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
650 reset_flow_sensitive_info_in_bb (cond_bb);
651
652 /* Note that we optimized this PHI. */
653 return true;
654 }
655
656 /* Update *ARG which is defined in STMT so that it contains the
657 computed value if that seems profitable. Return true if the
658 statement is made dead by that rewriting. */
659
660 static bool
661 jump_function_from_stmt (tree *arg, gimple *stmt)
662 {
663 enum tree_code code = gimple_assign_rhs_code (stmt);
664 if (code == ADDR_EXPR)
665 {
666 /* For arg = &p->i transform it to p, if possible. */
667 tree rhs1 = gimple_assign_rhs1 (stmt);
668 HOST_WIDE_INT offset;
669 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
670 &offset);
671 if (tem
672 && TREE_CODE (tem) == MEM_REF
673 && (mem_ref_offset (tem) + offset) == 0)
674 {
675 *arg = TREE_OPERAND (tem, 0);
676 return true;
677 }
678 }
679 /* TODO: Much like IPA-CP jump-functions we want to handle constant
680 additions symbolically here, and we'd need to update the comparison
681 code that compares the arg + cst tuples in our caller. For now the
682 code above exactly handles the VEC_BASE pattern from vec.h. */
683 return false;
684 }
685
686 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
687 of the form SSA_NAME NE 0.
688
689 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
690 the two input values of the EQ_EXPR match arg0 and arg1.
691
692 If so update *code and return TRUE. Otherwise return FALSE. */
693
694 static bool
695 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
696 enum tree_code *code, const_tree rhs)
697 {
698 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
699 statement. */
700 if (TREE_CODE (rhs) == SSA_NAME)
701 {
702 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
703
704 /* Verify the defining statement has an EQ_EXPR on the RHS. */
705 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
706 {
707 /* Finally verify the source operands of the EQ_EXPR are equal
708 to arg0 and arg1. */
709 tree op0 = gimple_assign_rhs1 (def1);
710 tree op1 = gimple_assign_rhs2 (def1);
711 if ((operand_equal_for_phi_arg_p (arg0, op0)
712 && operand_equal_for_phi_arg_p (arg1, op1))
713 || (operand_equal_for_phi_arg_p (arg0, op1)
714 && operand_equal_for_phi_arg_p (arg1, op0)))
715 {
716 /* We will perform the optimization. */
717 *code = gimple_assign_rhs_code (def1);
718 return true;
719 }
720 }
721 }
722 return false;
723 }
724
725 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
726
727 Also return TRUE if arg0/arg1 are equal to the source arguments of a
728 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
729
730 Return FALSE otherwise. */
731
732 static bool
733 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
734 enum tree_code *code, gimple *cond)
735 {
736 gimple *def;
737 tree lhs = gimple_cond_lhs (cond);
738 tree rhs = gimple_cond_rhs (cond);
739
740 if ((operand_equal_for_phi_arg_p (arg0, lhs)
741 && operand_equal_for_phi_arg_p (arg1, rhs))
742 || (operand_equal_for_phi_arg_p (arg1, lhs)
743 && operand_equal_for_phi_arg_p (arg0, rhs)))
744 return true;
745
746 /* Now handle more complex case where we have an EQ comparison
747 which feeds a BIT_AND_EXPR which feeds COND.
748
749 First verify that COND is of the form SSA_NAME NE 0. */
750 if (*code != NE_EXPR || !integer_zerop (rhs)
751 || TREE_CODE (lhs) != SSA_NAME)
752 return false;
753
754 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
755 def = SSA_NAME_DEF_STMT (lhs);
756 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
757 return false;
758
759 /* Now verify arg0/arg1 correspond to the source arguments of an
760 EQ comparison feeding the BIT_AND_EXPR. */
761
762 tree tmp = gimple_assign_rhs1 (def);
763 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
764 return true;
765
766 tmp = gimple_assign_rhs2 (def);
767 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
768 return true;
769
770 return false;
771 }
772
773 /* Returns true if ARG is a neutral element for operation CODE
774 on the RIGHT side. */
775
776 static bool
777 neutral_element_p (tree_code code, tree arg, bool right)
778 {
779 switch (code)
780 {
781 case PLUS_EXPR:
782 case BIT_IOR_EXPR:
783 case BIT_XOR_EXPR:
784 return integer_zerop (arg);
785
786 case LROTATE_EXPR:
787 case RROTATE_EXPR:
788 case LSHIFT_EXPR:
789 case RSHIFT_EXPR:
790 case MINUS_EXPR:
791 case POINTER_PLUS_EXPR:
792 return right && integer_zerop (arg);
793
794 case MULT_EXPR:
795 return integer_onep (arg);
796
797 case TRUNC_DIV_EXPR:
798 case CEIL_DIV_EXPR:
799 case FLOOR_DIV_EXPR:
800 case ROUND_DIV_EXPR:
801 case EXACT_DIV_EXPR:
802 return right && integer_onep (arg);
803
804 case BIT_AND_EXPR:
805 return integer_all_onesp (arg);
806
807 default:
808 return false;
809 }
810 }
811
812 /* Returns true if ARG is an absorbing element for operation CODE. */
813
814 static bool
815 absorbing_element_p (tree_code code, tree arg)
816 {
817 switch (code)
818 {
819 case BIT_IOR_EXPR:
820 return integer_all_onesp (arg);
821
822 case MULT_EXPR:
823 case BIT_AND_EXPR:
824 return integer_zerop (arg);
825
826 default:
827 return false;
828 }
829 }
830
831 /* The function value_replacement does the main work of doing the value
832 replacement. Return non-zero if the replacement is done. Otherwise return
833 0. If we remove the middle basic block, return 2.
834 BB is the basic block where the replacement is going to be done on. ARG0
835 is argument 0 from the PHI. Likewise for ARG1. */
836
837 static int
838 value_replacement (basic_block cond_bb, basic_block middle_bb,
839 edge e0, edge e1, gimple *phi,
840 tree arg0, tree arg1)
841 {
842 gimple_stmt_iterator gsi;
843 gimple *cond;
844 edge true_edge, false_edge;
845 enum tree_code code;
846 bool emtpy_or_with_defined_p = true;
847
848 /* If the type says honor signed zeros we cannot do this
849 optimization. */
850 if (HONOR_SIGNED_ZEROS (arg1))
851 return 0;
852
853 /* If there is a statement in MIDDLE_BB that defines one of the PHI
854 arguments, then adjust arg0 or arg1. */
855 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
856 while (!gsi_end_p (gsi))
857 {
858 gimple *stmt = gsi_stmt (gsi);
859 tree lhs;
860 gsi_next_nondebug (&gsi);
861 if (!is_gimple_assign (stmt))
862 {
863 emtpy_or_with_defined_p = false;
864 continue;
865 }
866 /* Now try to adjust arg0 or arg1 according to the computation
867 in the statement. */
868 lhs = gimple_assign_lhs (stmt);
869 if (!(lhs == arg0
870 && jump_function_from_stmt (&arg0, stmt))
871 || (lhs == arg1
872 && jump_function_from_stmt (&arg1, stmt)))
873 emtpy_or_with_defined_p = false;
874 }
875
876 cond = last_stmt (cond_bb);
877 code = gimple_cond_code (cond);
878
879 /* This transformation is only valid for equality comparisons. */
880 if (code != NE_EXPR && code != EQ_EXPR)
881 return 0;
882
883 /* We need to know which is the true edge and which is the false
884 edge so that we know if have abs or negative abs. */
885 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
886
887 /* At this point we know we have a COND_EXPR with two successors.
888 One successor is BB, the other successor is an empty block which
889 falls through into BB.
890
891 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
892
893 There is a single PHI node at the join point (BB) with two arguments.
894
895 We now need to verify that the two arguments in the PHI node match
896 the two arguments to the equality comparison. */
897
898 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
899 {
900 edge e;
901 tree arg;
902
903 /* For NE_EXPR, we want to build an assignment result = arg where
904 arg is the PHI argument associated with the true edge. For
905 EQ_EXPR we want the PHI argument associated with the false edge. */
906 e = (code == NE_EXPR ? true_edge : false_edge);
907
908 /* Unfortunately, E may not reach BB (it may instead have gone to
909 OTHER_BLOCK). If that is the case, then we want the single outgoing
910 edge from OTHER_BLOCK which reaches BB and represents the desired
911 path from COND_BLOCK. */
912 if (e->dest == middle_bb)
913 e = single_succ_edge (e->dest);
914
915 /* Now we know the incoming edge to BB that has the argument for the
916 RHS of our new assignment statement. */
917 if (e0 == e)
918 arg = arg0;
919 else
920 arg = arg1;
921
922 /* If the middle basic block was empty or is defining the
923 PHI arguments and this is a single phi where the args are different
924 for the edges e0 and e1 then we can remove the middle basic block. */
925 if (emtpy_or_with_defined_p
926 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
927 e0, e1) == phi)
928 {
929 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
930 /* Note that we optimized this PHI. */
931 return 2;
932 }
933 else
934 {
935 /* Replace the PHI arguments with arg. */
936 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
937 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
938 if (dump_file && (dump_flags & TDF_DETAILS))
939 {
940 fprintf (dump_file, "PHI ");
941 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
942 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
943 cond_bb->index);
944 print_generic_expr (dump_file, arg, 0);
945 fprintf (dump_file, ".\n");
946 }
947 return 1;
948 }
949
950 }
951
952 /* Now optimize (x != 0) ? x + y : y to just y.
953 The following condition is too restrictive, there can easily be another
954 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
955 gimple *assign = last_and_only_stmt (middle_bb);
956 if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
957 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
958 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
959 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
960 return 0;
961
962 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
963 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
964 return 0;
965
966 /* Only transform if it removes the condition. */
967 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
968 return 0;
969
970 /* Size-wise, this is always profitable. */
971 if (optimize_bb_for_speed_p (cond_bb)
972 /* The special case is useless if it has a low probability. */
973 && profile_status_for_fn (cfun) != PROFILE_ABSENT
974 && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
975 /* If assign is cheap, there is no point avoiding it. */
976 && estimate_num_insns (assign, &eni_time_weights)
977 >= 3 * estimate_num_insns (cond, &eni_time_weights))
978 return 0;
979
980 tree lhs = gimple_assign_lhs (assign);
981 tree rhs1 = gimple_assign_rhs1 (assign);
982 tree rhs2 = gimple_assign_rhs2 (assign);
983 enum tree_code code_def = gimple_assign_rhs_code (assign);
984 tree cond_lhs = gimple_cond_lhs (cond);
985 tree cond_rhs = gimple_cond_rhs (cond);
986
987 if (((code == NE_EXPR && e1 == false_edge)
988 || (code == EQ_EXPR && e1 == true_edge))
989 && arg0 == lhs
990 && ((arg1 == rhs1
991 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
992 && neutral_element_p (code_def, cond_rhs, true))
993 || (arg1 == rhs2
994 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
995 && neutral_element_p (code_def, cond_rhs, false))
996 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
997 && (operand_equal_for_phi_arg_p (rhs2, cond_lhs)
998 || operand_equal_for_phi_arg_p (rhs1, cond_lhs))
999 && absorbing_element_p (code_def, cond_rhs))))
1000 {
1001 gsi = gsi_for_stmt (cond);
1002 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1003 {
1004 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1005 def-stmt in:
1006 if (n_5 != 0)
1007 goto <bb 3>;
1008 else
1009 goto <bb 4>;
1010
1011 <bb 3>:
1012 # RANGE [0, 4294967294]
1013 u_6 = n_5 + 4294967295;
1014
1015 <bb 4>:
1016 # u_3 = PHI <u_6(3), 4294967295(2)> */
1017 SSA_NAME_RANGE_INFO (lhs) = NULL;
1018 /* If available, we can use VR of phi result at least. */
1019 tree phires = gimple_phi_result (phi);
1020 struct range_info_def *phires_range_info
1021 = SSA_NAME_RANGE_INFO (phires);
1022 if (phires_range_info)
1023 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1024 phires_range_info);
1025 }
1026 gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
1027 gsi_move_before (&gsi_from, &gsi);
1028 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1029 return 2;
1030 }
1031
1032 return 0;
1033 }
1034
1035 /* The function minmax_replacement does the main work of doing the minmax
1036 replacement. Return true if the replacement is done. Otherwise return
1037 false.
1038 BB is the basic block where the replacement is going to be done on. ARG0
1039 is argument 0 from the PHI. Likewise for ARG1. */
1040
1041 static bool
1042 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1043 edge e0, edge e1, gimple *phi,
1044 tree arg0, tree arg1)
1045 {
1046 tree result, type;
1047 gcond *cond;
1048 gassign *new_stmt;
1049 edge true_edge, false_edge;
1050 enum tree_code cmp, minmax, ass_code;
1051 tree smaller, alt_smaller, larger, alt_larger, arg_true, arg_false;
1052 gimple_stmt_iterator gsi, gsi_from;
1053
1054 type = TREE_TYPE (PHI_RESULT (phi));
1055
1056 /* The optimization may be unsafe due to NaNs. */
1057 if (HONOR_NANS (type))
1058 return false;
1059
1060 cond = as_a <gcond *> (last_stmt (cond_bb));
1061 cmp = gimple_cond_code (cond);
1062
1063 /* This transformation is only valid for order comparisons. Record which
1064 operand is smaller/larger if the result of the comparison is true. */
1065 alt_smaller = NULL_TREE;
1066 alt_larger = NULL_TREE;
1067 if (cmp == LT_EXPR || cmp == LE_EXPR)
1068 {
1069 smaller = gimple_cond_lhs (cond);
1070 larger = gimple_cond_rhs (cond);
1071 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1072 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1073 if (TREE_CODE (larger) == INTEGER_CST)
1074 {
1075 if (cmp == LT_EXPR)
1076 {
1077 bool overflow;
1078 wide_int alt = wi::sub (larger, 1, TYPE_SIGN (TREE_TYPE (larger)),
1079 &overflow);
1080 if (! overflow)
1081 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1082 }
1083 else
1084 {
1085 bool overflow;
1086 wide_int alt = wi::add (larger, 1, TYPE_SIGN (TREE_TYPE (larger)),
1087 &overflow);
1088 if (! overflow)
1089 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1090 }
1091 }
1092 }
1093 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1094 {
1095 smaller = gimple_cond_rhs (cond);
1096 larger = gimple_cond_lhs (cond);
1097 /* If we have larger > CST it is equivalent to larger >= CST+1.
1098 Likewise larger >= CST is equivalent to larger > CST-1. */
1099 if (TREE_CODE (smaller) == INTEGER_CST)
1100 {
1101 if (cmp == GT_EXPR)
1102 {
1103 bool overflow;
1104 wide_int alt = wi::add (smaller, 1, TYPE_SIGN (TREE_TYPE (smaller)),
1105 &overflow);
1106 if (! overflow)
1107 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1108 }
1109 else
1110 {
1111 bool overflow;
1112 wide_int alt = wi::sub (smaller, 1, TYPE_SIGN (TREE_TYPE (smaller)),
1113 &overflow);
1114 if (! overflow)
1115 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1116 }
1117 }
1118 }
1119 else
1120 return false;
1121
1122 /* We need to know which is the true edge and which is the false
1123 edge so that we know if have abs or negative abs. */
1124 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1125
1126 /* Forward the edges over the middle basic block. */
1127 if (true_edge->dest == middle_bb)
1128 true_edge = EDGE_SUCC (true_edge->dest, 0);
1129 if (false_edge->dest == middle_bb)
1130 false_edge = EDGE_SUCC (false_edge->dest, 0);
1131
1132 if (true_edge == e0)
1133 {
1134 gcc_assert (false_edge == e1);
1135 arg_true = arg0;
1136 arg_false = arg1;
1137 }
1138 else
1139 {
1140 gcc_assert (false_edge == e0);
1141 gcc_assert (true_edge == e1);
1142 arg_true = arg1;
1143 arg_false = arg0;
1144 }
1145
1146 if (empty_block_p (middle_bb))
1147 {
1148 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1149 || (alt_smaller
1150 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1151 && (operand_equal_for_phi_arg_p (arg_false, larger)
1152 || (alt_larger
1153 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1154 {
1155 /* Case
1156
1157 if (smaller < larger)
1158 rslt = smaller;
1159 else
1160 rslt = larger; */
1161 minmax = MIN_EXPR;
1162 }
1163 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1164 || (alt_smaller
1165 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1166 && (operand_equal_for_phi_arg_p (arg_true, larger)
1167 || (alt_larger
1168 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1169 minmax = MAX_EXPR;
1170 else
1171 return false;
1172 }
1173 else
1174 {
1175 /* Recognize the following case, assuming d <= u:
1176
1177 if (a <= u)
1178 b = MAX (a, d);
1179 x = PHI <b, u>
1180
1181 This is equivalent to
1182
1183 b = MAX (a, d);
1184 x = MIN (b, u); */
1185
1186 gimple *assign = last_and_only_stmt (middle_bb);
1187 tree lhs, op0, op1, bound;
1188
1189 if (!assign
1190 || gimple_code (assign) != GIMPLE_ASSIGN)
1191 return false;
1192
1193 lhs = gimple_assign_lhs (assign);
1194 ass_code = gimple_assign_rhs_code (assign);
1195 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1196 return false;
1197 op0 = gimple_assign_rhs1 (assign);
1198 op1 = gimple_assign_rhs2 (assign);
1199
1200 if (true_edge->src == middle_bb)
1201 {
1202 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1203 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1204 return false;
1205
1206 if (operand_equal_for_phi_arg_p (arg_false, larger)
1207 || (alt_larger
1208 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1209 {
1210 /* Case
1211
1212 if (smaller < larger)
1213 {
1214 r' = MAX_EXPR (smaller, bound)
1215 }
1216 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1217 if (ass_code != MAX_EXPR)
1218 return false;
1219
1220 minmax = MIN_EXPR;
1221 if (operand_equal_for_phi_arg_p (op0, smaller)
1222 || (alt_smaller
1223 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1224 bound = op1;
1225 else if (operand_equal_for_phi_arg_p (op1, smaller)
1226 || (alt_smaller
1227 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1228 bound = op0;
1229 else
1230 return false;
1231
1232 /* We need BOUND <= LARGER. */
1233 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1234 bound, larger)))
1235 return false;
1236 }
1237 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1238 || (alt_smaller
1239 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1240 {
1241 /* Case
1242
1243 if (smaller < larger)
1244 {
1245 r' = MIN_EXPR (larger, bound)
1246 }
1247 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1248 if (ass_code != MIN_EXPR)
1249 return false;
1250
1251 minmax = MAX_EXPR;
1252 if (operand_equal_for_phi_arg_p (op0, larger)
1253 || (alt_larger
1254 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1255 bound = op1;
1256 else if (operand_equal_for_phi_arg_p (op1, larger)
1257 || (alt_larger
1258 && operand_equal_for_phi_arg_p (op1, alt_larger)))
1259 bound = op0;
1260 else
1261 return false;
1262
1263 /* We need BOUND >= SMALLER. */
1264 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1265 bound, smaller)))
1266 return false;
1267 }
1268 else
1269 return false;
1270 }
1271 else
1272 {
1273 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1274 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1275 return false;
1276
1277 if (operand_equal_for_phi_arg_p (arg_true, larger)
1278 || (alt_larger
1279 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1280 {
1281 /* Case
1282
1283 if (smaller > larger)
1284 {
1285 r' = MIN_EXPR (smaller, bound)
1286 }
1287 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1288 if (ass_code != MIN_EXPR)
1289 return false;
1290
1291 minmax = MAX_EXPR;
1292 if (operand_equal_for_phi_arg_p (op0, smaller)
1293 || (alt_smaller
1294 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1295 bound = op1;
1296 else if (operand_equal_for_phi_arg_p (op1, smaller)
1297 || (alt_smaller
1298 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1299 bound = op0;
1300 else
1301 return false;
1302
1303 /* We need BOUND >= LARGER. */
1304 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1305 bound, larger)))
1306 return false;
1307 }
1308 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1309 || (alt_smaller
1310 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1311 {
1312 /* Case
1313
1314 if (smaller > larger)
1315 {
1316 r' = MAX_EXPR (larger, bound)
1317 }
1318 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1319 if (ass_code != MAX_EXPR)
1320 return false;
1321
1322 minmax = MIN_EXPR;
1323 if (operand_equal_for_phi_arg_p (op0, larger))
1324 bound = op1;
1325 else if (operand_equal_for_phi_arg_p (op1, larger))
1326 bound = op0;
1327 else
1328 return false;
1329
1330 /* We need BOUND <= SMALLER. */
1331 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1332 bound, smaller)))
1333 return false;
1334 }
1335 else
1336 return false;
1337 }
1338
1339 /* Move the statement from the middle block. */
1340 gsi = gsi_last_bb (cond_bb);
1341 gsi_from = gsi_last_nondebug_bb (middle_bb);
1342 gsi_move_before (&gsi_from, &gsi);
1343 }
1344
1345 /* Create an SSA var to hold the min/max result. If we're the only
1346 things setting the target PHI, then we can clone the PHI
1347 variable. Otherwise we must create a new one. */
1348 result = PHI_RESULT (phi);
1349 if (EDGE_COUNT (gimple_bb (phi)->preds) == 2)
1350 result = duplicate_ssa_name (result, NULL);
1351 else
1352 result = make_ssa_name (TREE_TYPE (result));
1353
1354 /* Emit the statement to compute min/max. */
1355 new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
1356 gsi = gsi_last_bb (cond_bb);
1357 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1358
1359 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1360 reset_flow_sensitive_info_in_bb (cond_bb);
1361
1362 return true;
1363 }
1364
1365 /* The function absolute_replacement does the main work of doing the absolute
1366 replacement. Return true if the replacement is done. Otherwise return
1367 false.
1368 bb is the basic block where the replacement is going to be done on. arg0
1369 is argument 0 from the phi. Likewise for arg1. */
1370
1371 static bool
1372 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1373 edge e0 ATTRIBUTE_UNUSED, edge e1,
1374 gimple *phi, tree arg0, tree arg1)
1375 {
1376 tree result;
1377 gassign *new_stmt;
1378 gimple *cond;
1379 gimple_stmt_iterator gsi;
1380 edge true_edge, false_edge;
1381 gimple *assign;
1382 edge e;
1383 tree rhs, lhs;
1384 bool negate;
1385 enum tree_code cond_code;
1386
1387 /* If the type says honor signed zeros we cannot do this
1388 optimization. */
1389 if (HONOR_SIGNED_ZEROS (arg1))
1390 return false;
1391
1392 /* OTHER_BLOCK must have only one executable statement which must have the
1393 form arg0 = -arg1 or arg1 = -arg0. */
1394
1395 assign = last_and_only_stmt (middle_bb);
1396 /* If we did not find the proper negation assignment, then we can not
1397 optimize. */
1398 if (assign == NULL)
1399 return false;
1400
1401 /* If we got here, then we have found the only executable statement
1402 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1403 arg1 = -arg0, then we can not optimize. */
1404 if (gimple_code (assign) != GIMPLE_ASSIGN)
1405 return false;
1406
1407 lhs = gimple_assign_lhs (assign);
1408
1409 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1410 return false;
1411
1412 rhs = gimple_assign_rhs1 (assign);
1413
1414 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1415 if (!(lhs == arg0 && rhs == arg1)
1416 && !(lhs == arg1 && rhs == arg0))
1417 return false;
1418
1419 cond = last_stmt (cond_bb);
1420 result = PHI_RESULT (phi);
1421
1422 /* Only relationals comparing arg[01] against zero are interesting. */
1423 cond_code = gimple_cond_code (cond);
1424 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1425 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1426 return false;
1427
1428 /* Make sure the conditional is arg[01] OP y. */
1429 if (gimple_cond_lhs (cond) != rhs)
1430 return false;
1431
1432 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1433 ? real_zerop (gimple_cond_rhs (cond))
1434 : integer_zerop (gimple_cond_rhs (cond)))
1435 ;
1436 else
1437 return false;
1438
1439 /* We need to know which is the true edge and which is the false
1440 edge so that we know if have abs or negative abs. */
1441 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1442
1443 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1444 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1445 the false edge goes to OTHER_BLOCK. */
1446 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1447 e = true_edge;
1448 else
1449 e = false_edge;
1450
1451 if (e->dest == middle_bb)
1452 negate = true;
1453 else
1454 negate = false;
1455
1456 result = duplicate_ssa_name (result, NULL);
1457
1458 if (negate)
1459 lhs = make_ssa_name (TREE_TYPE (result));
1460 else
1461 lhs = result;
1462
1463 /* Build the modify expression with abs expression. */
1464 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1465
1466 gsi = gsi_last_bb (cond_bb);
1467 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1468
1469 if (negate)
1470 {
1471 /* Get the right GSI. We want to insert after the recently
1472 added ABS_EXPR statement (which we know is the first statement
1473 in the block. */
1474 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
1475
1476 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1477 }
1478
1479 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1480 reset_flow_sensitive_info_in_bb (cond_bb);
1481
1482 /* Note that we optimized this PHI. */
1483 return true;
1484 }
1485
1486 /* Auxiliary functions to determine the set of memory accesses which
1487 can't trap because they are preceded by accesses to the same memory
1488 portion. We do that for MEM_REFs, so we only need to track
1489 the SSA_NAME of the pointer indirectly referenced. The algorithm
1490 simply is a walk over all instructions in dominator order. When
1491 we see an MEM_REF we determine if we've already seen a same
1492 ref anywhere up to the root of the dominator tree. If we do the
1493 current access can't trap. If we don't see any dominating access
1494 the current access might trap, but might also make later accesses
1495 non-trapping, so we remember it. We need to be careful with loads
1496 or stores, for instance a load might not trap, while a store would,
1497 so if we see a dominating read access this doesn't mean that a later
1498 write access would not trap. Hence we also need to differentiate the
1499 type of access(es) seen.
1500
1501 ??? We currently are very conservative and assume that a load might
1502 trap even if a store doesn't (write-only memory). This probably is
1503 overly conservative. */
1504
1505 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1506 through it was seen, which would constitute a no-trap region for
1507 same accesses. */
1508 struct name_to_bb
1509 {
1510 unsigned int ssa_name_ver;
1511 unsigned int phase;
1512 bool store;
1513 HOST_WIDE_INT offset, size;
1514 basic_block bb;
1515 };
1516
1517 /* Hashtable helpers. */
1518
1519 struct ssa_names_hasher : free_ptr_hash <name_to_bb>
1520 {
1521 static inline hashval_t hash (const name_to_bb *);
1522 static inline bool equal (const name_to_bb *, const name_to_bb *);
1523 };
1524
1525 /* Used for quick clearing of the hash-table when we see calls.
1526 Hash entries with phase < nt_call_phase are invalid. */
1527 static unsigned int nt_call_phase;
1528
1529 /* The hash function. */
1530
1531 inline hashval_t
1532 ssa_names_hasher::hash (const name_to_bb *n)
1533 {
1534 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1535 ^ (n->offset << 6) ^ (n->size << 3);
1536 }
1537
1538 /* The equality function of *P1 and *P2. */
1539
1540 inline bool
1541 ssa_names_hasher::equal (const name_to_bb *n1, const name_to_bb *n2)
1542 {
1543 return n1->ssa_name_ver == n2->ssa_name_ver
1544 && n1->store == n2->store
1545 && n1->offset == n2->offset
1546 && n1->size == n2->size;
1547 }
1548
1549 class nontrapping_dom_walker : public dom_walker
1550 {
1551 public:
1552 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1553 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1554
1555 virtual edge before_dom_children (basic_block);
1556 virtual void after_dom_children (basic_block);
1557
1558 private:
1559
1560 /* We see the expression EXP in basic block BB. If it's an interesting
1561 expression (an MEM_REF through an SSA_NAME) possibly insert the
1562 expression into the set NONTRAP or the hash table of seen expressions.
1563 STORE is true if this expression is on the LHS, otherwise it's on
1564 the RHS. */
1565 void add_or_mark_expr (basic_block, tree, bool);
1566
1567 hash_set<tree> *m_nontrapping;
1568
1569 /* The hash table for remembering what we've seen. */
1570 hash_table<ssa_names_hasher> m_seen_ssa_names;
1571 };
1572
1573 /* Called by walk_dominator_tree, when entering the block BB. */
1574 edge
1575 nontrapping_dom_walker::before_dom_children (basic_block bb)
1576 {
1577 edge e;
1578 edge_iterator ei;
1579 gimple_stmt_iterator gsi;
1580
1581 /* If we haven't seen all our predecessors, clear the hash-table. */
1582 FOR_EACH_EDGE (e, ei, bb->preds)
1583 if ((((size_t)e->src->aux) & 2) == 0)
1584 {
1585 nt_call_phase++;
1586 break;
1587 }
1588
1589 /* Mark this BB as being on the path to dominator root and as visited. */
1590 bb->aux = (void*)(1 | 2);
1591
1592 /* And walk the statements in order. */
1593 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1594 {
1595 gimple *stmt = gsi_stmt (gsi);
1596
1597 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
1598 || (is_gimple_call (stmt)
1599 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
1600 nt_call_phase++;
1601 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1602 {
1603 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1604 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1605 }
1606 }
1607 return NULL;
1608 }
1609
1610 /* Called by walk_dominator_tree, when basic block BB is exited. */
1611 void
1612 nontrapping_dom_walker::after_dom_children (basic_block bb)
1613 {
1614 /* This BB isn't on the path to dominator root anymore. */
1615 bb->aux = (void*)2;
1616 }
1617
1618 /* We see the expression EXP in basic block BB. If it's an interesting
1619 expression (an MEM_REF through an SSA_NAME) possibly insert the
1620 expression into the set NONTRAP or the hash table of seen expressions.
1621 STORE is true if this expression is on the LHS, otherwise it's on
1622 the RHS. */
1623 void
1624 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1625 {
1626 HOST_WIDE_INT size;
1627
1628 if (TREE_CODE (exp) == MEM_REF
1629 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1630 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1631 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1632 {
1633 tree name = TREE_OPERAND (exp, 0);
1634 struct name_to_bb map;
1635 name_to_bb **slot;
1636 struct name_to_bb *n2bb;
1637 basic_block found_bb = 0;
1638
1639 /* Try to find the last seen MEM_REF through the same
1640 SSA_NAME, which can trap. */
1641 map.ssa_name_ver = SSA_NAME_VERSION (name);
1642 map.phase = 0;
1643 map.bb = 0;
1644 map.store = store;
1645 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1646 map.size = size;
1647
1648 slot = m_seen_ssa_names.find_slot (&map, INSERT);
1649 n2bb = *slot;
1650 if (n2bb && n2bb->phase >= nt_call_phase)
1651 found_bb = n2bb->bb;
1652
1653 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1654 (it's in a basic block on the path from us to the dominator root)
1655 then we can't trap. */
1656 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1657 {
1658 m_nontrapping->add (exp);
1659 }
1660 else
1661 {
1662 /* EXP might trap, so insert it into the hash table. */
1663 if (n2bb)
1664 {
1665 n2bb->phase = nt_call_phase;
1666 n2bb->bb = bb;
1667 }
1668 else
1669 {
1670 n2bb = XNEW (struct name_to_bb);
1671 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1672 n2bb->phase = nt_call_phase;
1673 n2bb->bb = bb;
1674 n2bb->store = store;
1675 n2bb->offset = map.offset;
1676 n2bb->size = size;
1677 *slot = n2bb;
1678 }
1679 }
1680 }
1681 }
1682
1683 /* This is the entry point of gathering non trapping memory accesses.
1684 It will do a dominator walk over the whole function, and it will
1685 make use of the bb->aux pointers. It returns a set of trees
1686 (the MEM_REFs itself) which can't trap. */
1687 static hash_set<tree> *
1688 get_non_trapping (void)
1689 {
1690 nt_call_phase = 0;
1691 hash_set<tree> *nontrap = new hash_set<tree>;
1692 /* We're going to do a dominator walk, so ensure that we have
1693 dominance information. */
1694 calculate_dominance_info (CDI_DOMINATORS);
1695
1696 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1697 .walk (cfun->cfg->x_entry_block_ptr);
1698
1699 clear_aux_for_blocks ();
1700 return nontrap;
1701 }
1702
1703 /* Do the main work of conditional store replacement. We already know
1704 that the recognized pattern looks like so:
1705
1706 split:
1707 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1708 MIDDLE_BB:
1709 something
1710 fallthrough (edge E0)
1711 JOIN_BB:
1712 some more
1713
1714 We check that MIDDLE_BB contains only one store, that that store
1715 doesn't trap (not via NOTRAP, but via checking if an access to the same
1716 memory location dominates us) and that the store has a "simple" RHS. */
1717
1718 static bool
1719 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1720 edge e0, edge e1, hash_set<tree> *nontrap)
1721 {
1722 gimple *assign = last_and_only_stmt (middle_bb);
1723 tree lhs, rhs, name, name2;
1724 gphi *newphi;
1725 gassign *new_stmt;
1726 gimple_stmt_iterator gsi;
1727 source_location locus;
1728
1729 /* Check if middle_bb contains of only one store. */
1730 if (!assign
1731 || !gimple_assign_single_p (assign)
1732 || gimple_has_volatile_ops (assign))
1733 return false;
1734
1735 locus = gimple_location (assign);
1736 lhs = gimple_assign_lhs (assign);
1737 rhs = gimple_assign_rhs1 (assign);
1738 if (TREE_CODE (lhs) != MEM_REF
1739 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1740 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1741 return false;
1742
1743 /* Prove that we can move the store down. We could also check
1744 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1745 whose value is not available readily, which we want to avoid. */
1746 if (!nontrap->contains (lhs))
1747 return false;
1748
1749 /* Now we've checked the constraints, so do the transformation:
1750 1) Remove the single store. */
1751 gsi = gsi_for_stmt (assign);
1752 unlink_stmt_vdef (assign);
1753 gsi_remove (&gsi, true);
1754 release_defs (assign);
1755
1756 /* 2) Insert a load from the memory of the store to the temporary
1757 on the edge which did not contain the store. */
1758 lhs = unshare_expr (lhs);
1759 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1760 new_stmt = gimple_build_assign (name, lhs);
1761 gimple_set_location (new_stmt, locus);
1762 gsi_insert_on_edge (e1, new_stmt);
1763
1764 /* 3) Create a PHI node at the join block, with one argument
1765 holding the old RHS, and the other holding the temporary
1766 where we stored the old memory contents. */
1767 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1768 newphi = create_phi_node (name2, join_bb);
1769 add_phi_arg (newphi, rhs, e0, locus);
1770 add_phi_arg (newphi, name, e1, locus);
1771
1772 lhs = unshare_expr (lhs);
1773 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1774
1775 /* 4) Insert that PHI node. */
1776 gsi = gsi_after_labels (join_bb);
1777 if (gsi_end_p (gsi))
1778 {
1779 gsi = gsi_last_bb (join_bb);
1780 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1781 }
1782 else
1783 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1784
1785 return true;
1786 }
1787
1788 /* Do the main work of conditional store replacement. */
1789
1790 static bool
1791 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1792 basic_block join_bb, gimple *then_assign,
1793 gimple *else_assign)
1794 {
1795 tree lhs_base, lhs, then_rhs, else_rhs, name;
1796 source_location then_locus, else_locus;
1797 gimple_stmt_iterator gsi;
1798 gphi *newphi;
1799 gassign *new_stmt;
1800
1801 if (then_assign == NULL
1802 || !gimple_assign_single_p (then_assign)
1803 || gimple_clobber_p (then_assign)
1804 || gimple_has_volatile_ops (then_assign)
1805 || else_assign == NULL
1806 || !gimple_assign_single_p (else_assign)
1807 || gimple_clobber_p (else_assign)
1808 || gimple_has_volatile_ops (else_assign))
1809 return false;
1810
1811 lhs = gimple_assign_lhs (then_assign);
1812 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1813 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1814 return false;
1815
1816 lhs_base = get_base_address (lhs);
1817 if (lhs_base == NULL_TREE
1818 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1819 return false;
1820
1821 then_rhs = gimple_assign_rhs1 (then_assign);
1822 else_rhs = gimple_assign_rhs1 (else_assign);
1823 then_locus = gimple_location (then_assign);
1824 else_locus = gimple_location (else_assign);
1825
1826 /* Now we've checked the constraints, so do the transformation:
1827 1) Remove the stores. */
1828 gsi = gsi_for_stmt (then_assign);
1829 unlink_stmt_vdef (then_assign);
1830 gsi_remove (&gsi, true);
1831 release_defs (then_assign);
1832
1833 gsi = gsi_for_stmt (else_assign);
1834 unlink_stmt_vdef (else_assign);
1835 gsi_remove (&gsi, true);
1836 release_defs (else_assign);
1837
1838 /* 2) Create a PHI node at the join block, with one argument
1839 holding the old RHS, and the other holding the temporary
1840 where we stored the old memory contents. */
1841 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1842 newphi = create_phi_node (name, join_bb);
1843 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1844 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1845
1846 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1847
1848 /* 3) Insert that PHI node. */
1849 gsi = gsi_after_labels (join_bb);
1850 if (gsi_end_p (gsi))
1851 {
1852 gsi = gsi_last_bb (join_bb);
1853 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1854 }
1855 else
1856 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1857
1858 return true;
1859 }
1860
1861 /* Conditional store replacement. We already know
1862 that the recognized pattern looks like so:
1863
1864 split:
1865 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1866 THEN_BB:
1867 ...
1868 X = Y;
1869 ...
1870 goto JOIN_BB;
1871 ELSE_BB:
1872 ...
1873 X = Z;
1874 ...
1875 fallthrough (edge E0)
1876 JOIN_BB:
1877 some more
1878
1879 We check that it is safe to sink the store to JOIN_BB by verifying that
1880 there are no read-after-write or write-after-write dependencies in
1881 THEN_BB and ELSE_BB. */
1882
1883 static bool
1884 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1885 basic_block join_bb)
1886 {
1887 gimple *then_assign = last_and_only_stmt (then_bb);
1888 gimple *else_assign = last_and_only_stmt (else_bb);
1889 vec<data_reference_p> then_datarefs, else_datarefs;
1890 vec<ddr_p> then_ddrs, else_ddrs;
1891 gimple *then_store, *else_store;
1892 bool found, ok = false, res;
1893 struct data_dependence_relation *ddr;
1894 data_reference_p then_dr, else_dr;
1895 int i, j;
1896 tree then_lhs, else_lhs;
1897 basic_block blocks[3];
1898
1899 if (MAX_STORES_TO_SINK == 0)
1900 return false;
1901
1902 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1903 if (then_assign && else_assign)
1904 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1905 then_assign, else_assign);
1906
1907 /* Find data references. */
1908 then_datarefs.create (1);
1909 else_datarefs.create (1);
1910 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1911 == chrec_dont_know)
1912 || !then_datarefs.length ()
1913 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1914 == chrec_dont_know)
1915 || !else_datarefs.length ())
1916 {
1917 free_data_refs (then_datarefs);
1918 free_data_refs (else_datarefs);
1919 return false;
1920 }
1921
1922 /* Find pairs of stores with equal LHS. */
1923 auto_vec<gimple *, 1> then_stores, else_stores;
1924 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1925 {
1926 if (DR_IS_READ (then_dr))
1927 continue;
1928
1929 then_store = DR_STMT (then_dr);
1930 then_lhs = gimple_get_lhs (then_store);
1931 if (then_lhs == NULL_TREE)
1932 continue;
1933 found = false;
1934
1935 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1936 {
1937 if (DR_IS_READ (else_dr))
1938 continue;
1939
1940 else_store = DR_STMT (else_dr);
1941 else_lhs = gimple_get_lhs (else_store);
1942 if (else_lhs == NULL_TREE)
1943 continue;
1944
1945 if (operand_equal_p (then_lhs, else_lhs, 0))
1946 {
1947 found = true;
1948 break;
1949 }
1950 }
1951
1952 if (!found)
1953 continue;
1954
1955 then_stores.safe_push (then_store);
1956 else_stores.safe_push (else_store);
1957 }
1958
1959 /* No pairs of stores found. */
1960 if (!then_stores.length ()
1961 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1962 {
1963 free_data_refs (then_datarefs);
1964 free_data_refs (else_datarefs);
1965 return false;
1966 }
1967
1968 /* Compute and check data dependencies in both basic blocks. */
1969 then_ddrs.create (1);
1970 else_ddrs.create (1);
1971 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1972 vNULL, false)
1973 || !compute_all_dependences (else_datarefs, &else_ddrs,
1974 vNULL, false))
1975 {
1976 free_dependence_relations (then_ddrs);
1977 free_dependence_relations (else_ddrs);
1978 free_data_refs (then_datarefs);
1979 free_data_refs (else_datarefs);
1980 return false;
1981 }
1982 blocks[0] = then_bb;
1983 blocks[1] = else_bb;
1984 blocks[2] = join_bb;
1985 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1986
1987 /* Check that there are no read-after-write or write-after-write dependencies
1988 in THEN_BB. */
1989 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1990 {
1991 struct data_reference *dra = DDR_A (ddr);
1992 struct data_reference *drb = DDR_B (ddr);
1993
1994 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1995 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1996 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1997 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1998 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1999 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2000 {
2001 free_dependence_relations (then_ddrs);
2002 free_dependence_relations (else_ddrs);
2003 free_data_refs (then_datarefs);
2004 free_data_refs (else_datarefs);
2005 return false;
2006 }
2007 }
2008
2009 /* Check that there are no read-after-write or write-after-write dependencies
2010 in ELSE_BB. */
2011 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
2012 {
2013 struct data_reference *dra = DDR_A (ddr);
2014 struct data_reference *drb = DDR_B (ddr);
2015
2016 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2017 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2018 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2019 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2020 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2021 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2022 {
2023 free_dependence_relations (then_ddrs);
2024 free_dependence_relations (else_ddrs);
2025 free_data_refs (then_datarefs);
2026 free_data_refs (else_datarefs);
2027 return false;
2028 }
2029 }
2030
2031 /* Sink stores with same LHS. */
2032 FOR_EACH_VEC_ELT (then_stores, i, then_store)
2033 {
2034 else_store = else_stores[i];
2035 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2036 then_store, else_store);
2037 ok = ok || res;
2038 }
2039
2040 free_dependence_relations (then_ddrs);
2041 free_dependence_relations (else_ddrs);
2042 free_data_refs (then_datarefs);
2043 free_data_refs (else_datarefs);
2044
2045 return ok;
2046 }
2047
2048 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2049
2050 static bool
2051 local_mem_dependence (gimple *stmt, basic_block bb)
2052 {
2053 tree vuse = gimple_vuse (stmt);
2054 gimple *def;
2055
2056 if (!vuse)
2057 return false;
2058
2059 def = SSA_NAME_DEF_STMT (vuse);
2060 return (def && gimple_bb (def) == bb);
2061 }
2062
2063 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2064 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2065 and BB3 rejoins control flow following BB1 and BB2, look for
2066 opportunities to hoist loads as follows. If BB3 contains a PHI of
2067 two loads, one each occurring in BB1 and BB2, and the loads are
2068 provably of adjacent fields in the same structure, then move both
2069 loads into BB0. Of course this can only be done if there are no
2070 dependencies preventing such motion.
2071
2072 One of the hoisted loads will always be speculative, so the
2073 transformation is currently conservative:
2074
2075 - The fields must be strictly adjacent.
2076 - The two fields must occupy a single memory block that is
2077 guaranteed to not cross a page boundary.
2078
2079 The last is difficult to prove, as such memory blocks should be
2080 aligned on the minimum of the stack alignment boundary and the
2081 alignment guaranteed by heap allocation interfaces. Thus we rely
2082 on a parameter for the alignment value.
2083
2084 Provided a good value is used for the last case, the first
2085 restriction could possibly be relaxed. */
2086
2087 static void
2088 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2089 basic_block bb2, basic_block bb3)
2090 {
2091 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2092 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2093 gphi_iterator gsi;
2094
2095 /* Walk the phis in bb3 looking for an opportunity. We are looking
2096 for phis of two SSA names, one each of which is defined in bb1 and
2097 bb2. */
2098 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2099 {
2100 gphi *phi_stmt = gsi.phi ();
2101 gimple *def1, *def2;
2102 tree arg1, arg2, ref1, ref2, field1, field2;
2103 tree tree_offset1, tree_offset2, tree_size2, next;
2104 int offset1, offset2, size2;
2105 unsigned align1;
2106 gimple_stmt_iterator gsi2;
2107 basic_block bb_for_def1, bb_for_def2;
2108
2109 if (gimple_phi_num_args (phi_stmt) != 2
2110 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2111 continue;
2112
2113 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2114 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2115
2116 if (TREE_CODE (arg1) != SSA_NAME
2117 || TREE_CODE (arg2) != SSA_NAME
2118 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2119 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2120 continue;
2121
2122 def1 = SSA_NAME_DEF_STMT (arg1);
2123 def2 = SSA_NAME_DEF_STMT (arg2);
2124
2125 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2126 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2127 continue;
2128
2129 /* Check the mode of the arguments to be sure a conditional move
2130 can be generated for it. */
2131 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2132 == CODE_FOR_nothing)
2133 continue;
2134
2135 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2136 if (!gimple_assign_single_p (def1)
2137 || !gimple_assign_single_p (def2)
2138 || gimple_has_volatile_ops (def1)
2139 || gimple_has_volatile_ops (def2))
2140 continue;
2141
2142 ref1 = gimple_assign_rhs1 (def1);
2143 ref2 = gimple_assign_rhs1 (def2);
2144
2145 if (TREE_CODE (ref1) != COMPONENT_REF
2146 || TREE_CODE (ref2) != COMPONENT_REF)
2147 continue;
2148
2149 /* The zeroth operand of the two component references must be
2150 identical. It is not sufficient to compare get_base_address of
2151 the two references, because this could allow for different
2152 elements of the same array in the two trees. It is not safe to
2153 assume that the existence of one array element implies the
2154 existence of a different one. */
2155 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2156 continue;
2157
2158 field1 = TREE_OPERAND (ref1, 1);
2159 field2 = TREE_OPERAND (ref2, 1);
2160
2161 /* Check for field adjacency, and ensure field1 comes first. */
2162 for (next = DECL_CHAIN (field1);
2163 next && TREE_CODE (next) != FIELD_DECL;
2164 next = DECL_CHAIN (next))
2165 ;
2166
2167 if (next != field2)
2168 {
2169 for (next = DECL_CHAIN (field2);
2170 next && TREE_CODE (next) != FIELD_DECL;
2171 next = DECL_CHAIN (next))
2172 ;
2173
2174 if (next != field1)
2175 continue;
2176
2177 std::swap (field1, field2);
2178 std::swap (def1, def2);
2179 }
2180
2181 bb_for_def1 = gimple_bb (def1);
2182 bb_for_def2 = gimple_bb (def2);
2183
2184 /* Check for proper alignment of the first field. */
2185 tree_offset1 = bit_position (field1);
2186 tree_offset2 = bit_position (field2);
2187 tree_size2 = DECL_SIZE (field2);
2188
2189 if (!tree_fits_uhwi_p (tree_offset1)
2190 || !tree_fits_uhwi_p (tree_offset2)
2191 || !tree_fits_uhwi_p (tree_size2))
2192 continue;
2193
2194 offset1 = tree_to_uhwi (tree_offset1);
2195 offset2 = tree_to_uhwi (tree_offset2);
2196 size2 = tree_to_uhwi (tree_size2);
2197 align1 = DECL_ALIGN (field1) % param_align_bits;
2198
2199 if (offset1 % BITS_PER_UNIT != 0)
2200 continue;
2201
2202 /* For profitability, the two field references should fit within
2203 a single cache line. */
2204 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2205 continue;
2206
2207 /* The two expressions cannot be dependent upon vdefs defined
2208 in bb1/bb2. */
2209 if (local_mem_dependence (def1, bb_for_def1)
2210 || local_mem_dependence (def2, bb_for_def2))
2211 continue;
2212
2213 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2214 bb0. We hoist the first one first so that a cache miss is handled
2215 efficiently regardless of hardware cache-fill policy. */
2216 gsi2 = gsi_for_stmt (def1);
2217 gsi_move_to_bb_end (&gsi2, bb0);
2218 gsi2 = gsi_for_stmt (def2);
2219 gsi_move_to_bb_end (&gsi2, bb0);
2220
2221 if (dump_file && (dump_flags & TDF_DETAILS))
2222 {
2223 fprintf (dump_file,
2224 "\nHoisting adjacent loads from %d and %d into %d: \n",
2225 bb_for_def1->index, bb_for_def2->index, bb0->index);
2226 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2227 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2228 }
2229 }
2230 }
2231
2232 /* Determine whether we should attempt to hoist adjacent loads out of
2233 diamond patterns in pass_phiopt. Always hoist loads if
2234 -fhoist-adjacent-loads is specified and the target machine has
2235 both a conditional move instruction and a defined cache line size. */
2236
2237 static bool
2238 gate_hoist_loads (void)
2239 {
2240 return (flag_hoist_adjacent_loads == 1
2241 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2242 && HAVE_conditional_move);
2243 }
2244
2245 /* This pass tries to replaces an if-then-else block with an
2246 assignment. We have four kinds of transformations. Some of these
2247 transformations are also performed by the ifcvt RTL optimizer.
2248
2249 Conditional Replacement
2250 -----------------------
2251
2252 This transformation, implemented in conditional_replacement,
2253 replaces
2254
2255 bb0:
2256 if (cond) goto bb2; else goto bb1;
2257 bb1:
2258 bb2:
2259 x = PHI <0 (bb1), 1 (bb0), ...>;
2260
2261 with
2262
2263 bb0:
2264 x' = cond;
2265 goto bb2;
2266 bb2:
2267 x = PHI <x' (bb0), ...>;
2268
2269 We remove bb1 as it becomes unreachable. This occurs often due to
2270 gimplification of conditionals.
2271
2272 Value Replacement
2273 -----------------
2274
2275 This transformation, implemented in value_replacement, replaces
2276
2277 bb0:
2278 if (a != b) goto bb2; else goto bb1;
2279 bb1:
2280 bb2:
2281 x = PHI <a (bb1), b (bb0), ...>;
2282
2283 with
2284
2285 bb0:
2286 bb2:
2287 x = PHI <b (bb0), ...>;
2288
2289 This opportunity can sometimes occur as a result of other
2290 optimizations.
2291
2292
2293 Another case caught by value replacement looks like this:
2294
2295 bb0:
2296 t1 = a == CONST;
2297 t2 = b > c;
2298 t3 = t1 & t2;
2299 if (t3 != 0) goto bb1; else goto bb2;
2300 bb1:
2301 bb2:
2302 x = PHI (CONST, a)
2303
2304 Gets replaced with:
2305 bb0:
2306 bb2:
2307 t1 = a == CONST;
2308 t2 = b > c;
2309 t3 = t1 & t2;
2310 x = a;
2311
2312 ABS Replacement
2313 ---------------
2314
2315 This transformation, implemented in abs_replacement, replaces
2316
2317 bb0:
2318 if (a >= 0) goto bb2; else goto bb1;
2319 bb1:
2320 x = -a;
2321 bb2:
2322 x = PHI <x (bb1), a (bb0), ...>;
2323
2324 with
2325
2326 bb0:
2327 x' = ABS_EXPR< a >;
2328 bb2:
2329 x = PHI <x' (bb0), ...>;
2330
2331 MIN/MAX Replacement
2332 -------------------
2333
2334 This transformation, minmax_replacement replaces
2335
2336 bb0:
2337 if (a <= b) goto bb2; else goto bb1;
2338 bb1:
2339 bb2:
2340 x = PHI <b (bb1), a (bb0), ...>;
2341
2342 with
2343
2344 bb0:
2345 x' = MIN_EXPR (a, b)
2346 bb2:
2347 x = PHI <x' (bb0), ...>;
2348
2349 A similar transformation is done for MAX_EXPR.
2350
2351
2352 This pass also performs a fifth transformation of a slightly different
2353 flavor.
2354
2355 Factor conversion in COND_EXPR
2356 ------------------------------
2357
2358 This transformation factors the conversion out of COND_EXPR with
2359 factor_out_conditional_conversion.
2360
2361 For example:
2362 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2363 <bb 3>:
2364 tmp = (int) a;
2365 <bb 4>:
2366 tmp = PHI <tmp, CST>
2367
2368 Into:
2369 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2370 <bb 3>:
2371 <bb 4>:
2372 a = PHI <a, CST>
2373 tmp = (int) a;
2374
2375 Adjacent Load Hoisting
2376 ----------------------
2377
2378 This transformation replaces
2379
2380 bb0:
2381 if (...) goto bb2; else goto bb1;
2382 bb1:
2383 x1 = (<expr>).field1;
2384 goto bb3;
2385 bb2:
2386 x2 = (<expr>).field2;
2387 bb3:
2388 # x = PHI <x1, x2>;
2389
2390 with
2391
2392 bb0:
2393 x1 = (<expr>).field1;
2394 x2 = (<expr>).field2;
2395 if (...) goto bb2; else goto bb1;
2396 bb1:
2397 goto bb3;
2398 bb2:
2399 bb3:
2400 # x = PHI <x1, x2>;
2401
2402 The purpose of this transformation is to enable generation of conditional
2403 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2404 the loads is speculative, the transformation is restricted to very
2405 specific cases to avoid introducing a page fault. We are looking for
2406 the common idiom:
2407
2408 if (...)
2409 x = y->left;
2410 else
2411 x = y->right;
2412
2413 where left and right are typically adjacent pointers in a tree structure. */
2414
2415 namespace {
2416
2417 const pass_data pass_data_phiopt =
2418 {
2419 GIMPLE_PASS, /* type */
2420 "phiopt", /* name */
2421 OPTGROUP_NONE, /* optinfo_flags */
2422 TV_TREE_PHIOPT, /* tv_id */
2423 ( PROP_cfg | PROP_ssa ), /* properties_required */
2424 0, /* properties_provided */
2425 0, /* properties_destroyed */
2426 0, /* todo_flags_start */
2427 0, /* todo_flags_finish */
2428 };
2429
2430 class pass_phiopt : public gimple_opt_pass
2431 {
2432 public:
2433 pass_phiopt (gcc::context *ctxt)
2434 : gimple_opt_pass (pass_data_phiopt, ctxt)
2435 {}
2436
2437 /* opt_pass methods: */
2438 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2439 virtual bool gate (function *) { return flag_ssa_phiopt; }
2440 virtual unsigned int execute (function *)
2441 {
2442 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2443 }
2444
2445 }; // class pass_phiopt
2446
2447 } // anon namespace
2448
2449 gimple_opt_pass *
2450 make_pass_phiopt (gcc::context *ctxt)
2451 {
2452 return new pass_phiopt (ctxt);
2453 }
2454
2455 namespace {
2456
2457 const pass_data pass_data_cselim =
2458 {
2459 GIMPLE_PASS, /* type */
2460 "cselim", /* name */
2461 OPTGROUP_NONE, /* optinfo_flags */
2462 TV_TREE_PHIOPT, /* tv_id */
2463 ( PROP_cfg | PROP_ssa ), /* properties_required */
2464 0, /* properties_provided */
2465 0, /* properties_destroyed */
2466 0, /* todo_flags_start */
2467 0, /* todo_flags_finish */
2468 };
2469
2470 class pass_cselim : public gimple_opt_pass
2471 {
2472 public:
2473 pass_cselim (gcc::context *ctxt)
2474 : gimple_opt_pass (pass_data_cselim, ctxt)
2475 {}
2476
2477 /* opt_pass methods: */
2478 virtual bool gate (function *) { return flag_tree_cselim; }
2479 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2480
2481 }; // class pass_cselim
2482
2483 } // anon namespace
2484
2485 gimple_opt_pass *
2486 make_pass_cselim (gcc::context *ctxt)
2487 {
2488 return new pass_cselim (ctxt);
2489 }