]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-phiopt.c
Try inverted comparison for match_simplify in phiopt
[thirdparty/gcc.git] / gcc / tree-ssa-phiopt.c
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "tree-ssa.h"
32 #include "optabs-tree.h"
33 #include "insn-config.h"
34 #include "gimple-pretty-print.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-cfg.h"
42 #include "tree-dfa.h"
43 #include "domwalk.h"
44 #include "cfgloop.h"
45 #include "tree-data-ref.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-inline.h"
48 #include "case-cfn-macros.h"
49 #include "tree-eh.h"
50 #include "gimple-fold.h"
51 #include "internal-fn.h"
52 #include "gimple-range.h"
53 #include "gimple-match.h"
54
55 static unsigned int tree_ssa_phiopt_worker (bool, bool, bool);
56 static bool two_value_replacement (basic_block, basic_block, edge, gphi *,
57 tree, tree);
58 static bool match_simplify_replacement (basic_block, basic_block,
59 edge, edge, gphi *, tree, tree, bool);
60 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree,
61 gimple *);
62 static int value_replacement (basic_block, basic_block,
63 edge, edge, gphi *, tree, tree);
64 static bool minmax_replacement (basic_block, basic_block,
65 edge, edge, gphi *, tree, tree);
66 static bool abs_replacement (basic_block, basic_block,
67 edge, edge, gphi *, tree, tree);
68 static bool spaceship_replacement (basic_block, basic_block,
69 edge, edge, gphi *, tree, tree);
70 static bool cond_removal_in_popcount_clz_ctz_pattern (basic_block, basic_block,
71 edge, edge, gphi *,
72 tree, tree);
73 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
74 hash_set<tree> *);
75 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
76 static hash_set<tree> * get_non_trapping ();
77 static void replace_phi_edge_with_variable (basic_block, edge, gphi *, tree);
78 static void hoist_adjacent_loads (basic_block, basic_block,
79 basic_block, basic_block);
80 static bool gate_hoist_loads (void);
81
82 /* This pass tries to transform conditional stores into unconditional
83 ones, enabling further simplifications with the simpler then and else
84 blocks. In particular it replaces this:
85
86 bb0:
87 if (cond) goto bb2; else goto bb1;
88 bb1:
89 *p = RHS;
90 bb2:
91
92 with
93
94 bb0:
95 if (cond) goto bb1; else goto bb2;
96 bb1:
97 condtmp' = *p;
98 bb2:
99 condtmp = PHI <RHS, condtmp'>
100 *p = condtmp;
101
102 This transformation can only be done under several constraints,
103 documented below. It also replaces:
104
105 bb0:
106 if (cond) goto bb2; else goto bb1;
107 bb1:
108 *p = RHS1;
109 goto bb3;
110 bb2:
111 *p = RHS2;
112 bb3:
113
114 with
115
116 bb0:
117 if (cond) goto bb3; else goto bb1;
118 bb1:
119 bb3:
120 condtmp = PHI <RHS1, RHS2>
121 *p = condtmp; */
122
123 static unsigned int
124 tree_ssa_cs_elim (void)
125 {
126 unsigned todo;
127 /* ??? We are not interested in loop related info, but the following
128 will create it, ICEing as we didn't init loops with pre-headers.
129 An interfacing issue of find_data_references_in_bb. */
130 loop_optimizer_init (LOOPS_NORMAL);
131 scev_initialize ();
132 todo = tree_ssa_phiopt_worker (true, false, false);
133 scev_finalize ();
134 loop_optimizer_finalize ();
135 return todo;
136 }
137
138 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
139
140 static gphi *
141 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
142 {
143 gimple_stmt_iterator i;
144 gphi *phi = NULL;
145 if (gimple_seq_singleton_p (seq))
146 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
147 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
148 {
149 gphi *p = as_a <gphi *> (gsi_stmt (i));
150 /* If the PHI arguments are equal then we can skip this PHI. */
151 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
152 gimple_phi_arg_def (p, e1->dest_idx)))
153 continue;
154
155 /* If we already have a PHI that has the two edge arguments are
156 different, then return it is not a singleton for these PHIs. */
157 if (phi)
158 return NULL;
159
160 phi = p;
161 }
162 return phi;
163 }
164
165 /* The core routine of conditional store replacement and normal
166 phi optimizations. Both share much of the infrastructure in how
167 to match applicable basic block patterns. DO_STORE_ELIM is true
168 when we want to do conditional store replacement, false otherwise.
169 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
170 of diamond control flow patterns, false otherwise. */
171 static unsigned int
172 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p)
173 {
174 basic_block bb;
175 basic_block *bb_order;
176 unsigned n, i;
177 bool cfgchanged = false;
178 hash_set<tree> *nontrap = 0;
179
180 calculate_dominance_info (CDI_DOMINATORS);
181
182 if (do_store_elim)
183 /* Calculate the set of non-trapping memory accesses. */
184 nontrap = get_non_trapping ();
185
186 /* Search every basic block for COND_EXPR we may be able to optimize.
187
188 We walk the blocks in order that guarantees that a block with
189 a single predecessor is processed before the predecessor.
190 This ensures that we collapse inner ifs before visiting the
191 outer ones, and also that we do not try to visit a removed
192 block. */
193 bb_order = single_pred_before_succ_order ();
194 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
195
196 for (i = 0; i < n; i++)
197 {
198 gimple *cond_stmt;
199 gphi *phi;
200 basic_block bb1, bb2;
201 edge e1, e2;
202 tree arg0, arg1;
203
204 bb = bb_order[i];
205
206 cond_stmt = last_stmt (bb);
207 /* Check to see if the last statement is a GIMPLE_COND. */
208 if (!cond_stmt
209 || gimple_code (cond_stmt) != GIMPLE_COND)
210 continue;
211
212 e1 = EDGE_SUCC (bb, 0);
213 bb1 = e1->dest;
214 e2 = EDGE_SUCC (bb, 1);
215 bb2 = e2->dest;
216
217 /* We cannot do the optimization on abnormal edges. */
218 if ((e1->flags & EDGE_ABNORMAL) != 0
219 || (e2->flags & EDGE_ABNORMAL) != 0)
220 continue;
221
222 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
223 if (EDGE_COUNT (bb1->succs) == 0
224 || bb2 == NULL
225 || EDGE_COUNT (bb2->succs) == 0)
226 continue;
227
228 /* Find the bb which is the fall through to the other. */
229 if (EDGE_SUCC (bb1, 0)->dest == bb2)
230 ;
231 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
232 {
233 std::swap (bb1, bb2);
234 std::swap (e1, e2);
235 }
236 else if (do_store_elim
237 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
238 {
239 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
240
241 if (!single_succ_p (bb1)
242 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
243 || !single_succ_p (bb2)
244 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
245 || EDGE_COUNT (bb3->preds) != 2)
246 continue;
247 if (cond_if_else_store_replacement (bb1, bb2, bb3))
248 cfgchanged = true;
249 continue;
250 }
251 else if (do_hoist_loads
252 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
253 {
254 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
255
256 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
257 && single_succ_p (bb1)
258 && single_succ_p (bb2)
259 && single_pred_p (bb1)
260 && single_pred_p (bb2)
261 && EDGE_COUNT (bb->succs) == 2
262 && EDGE_COUNT (bb3->preds) == 2
263 /* If one edge or the other is dominant, a conditional move
264 is likely to perform worse than the well-predicted branch. */
265 && !predictable_edge_p (EDGE_SUCC (bb, 0))
266 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
267 hoist_adjacent_loads (bb, bb1, bb2, bb3);
268 continue;
269 }
270 else
271 continue;
272
273 e1 = EDGE_SUCC (bb1, 0);
274
275 /* Make sure that bb1 is just a fall through. */
276 if (!single_succ_p (bb1)
277 || (e1->flags & EDGE_FALLTHRU) == 0)
278 continue;
279
280 /* Also make sure that bb1 only have one predecessor and that it
281 is bb. */
282 if (!single_pred_p (bb1)
283 || single_pred (bb1) != bb)
284 continue;
285
286 if (do_store_elim)
287 {
288 /* bb1 is the middle block, bb2 the join block, bb the split block,
289 e1 the fallthrough edge from bb1 to bb2. We can't do the
290 optimization if the join block has more than two predecessors. */
291 if (EDGE_COUNT (bb2->preds) > 2)
292 continue;
293 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
294 cfgchanged = true;
295 }
296 else
297 {
298 gimple_seq phis = phi_nodes (bb2);
299 gimple_stmt_iterator gsi;
300 bool candorest = true;
301
302 /* Value replacement can work with more than one PHI
303 so try that first. */
304 if (!early_p)
305 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
306 {
307 phi = as_a <gphi *> (gsi_stmt (gsi));
308 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
309 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
310 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
311 {
312 candorest = false;
313 cfgchanged = true;
314 break;
315 }
316 }
317
318 if (!candorest)
319 continue;
320
321 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
322 if (!phi)
323 continue;
324
325 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
326 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
327
328 /* Something is wrong if we cannot find the arguments in the PHI
329 node. */
330 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
331
332 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
333 arg0, arg1,
334 cond_stmt);
335 if (newphi != NULL)
336 {
337 phi = newphi;
338 /* factor_out_conditional_conversion may create a new PHI in
339 BB2 and eliminate an existing PHI in BB2. Recompute values
340 that may be affected by that change. */
341 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
342 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
343 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
344 }
345
346 /* Do the replacement of conditional if it can be done. */
347 if (!early_p && two_value_replacement (bb, bb1, e2, phi, arg0, arg1))
348 cfgchanged = true;
349 else if (match_simplify_replacement (bb, bb1, e1, e2, phi,
350 arg0, arg1,
351 early_p))
352 cfgchanged = true;
353 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
354 cfgchanged = true;
355 else if (!early_p
356 && cond_removal_in_popcount_clz_ctz_pattern (bb, bb1, e1,
357 e2, phi, arg0,
358 arg1))
359 cfgchanged = true;
360 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
361 cfgchanged = true;
362 else if (spaceship_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
363 cfgchanged = true;
364 }
365 }
366
367 free (bb_order);
368
369 if (do_store_elim)
370 delete nontrap;
371 /* If the CFG has changed, we should cleanup the CFG. */
372 if (cfgchanged && do_store_elim)
373 {
374 /* In cond-store replacement we have added some loads on edges
375 and new VOPS (as we moved the store, and created a load). */
376 gsi_commit_edge_inserts ();
377 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
378 }
379 else if (cfgchanged)
380 return TODO_cleanup_cfg;
381 return 0;
382 }
383
384 /* Replace PHI node element whose edge is E in block BB with variable NEW.
385 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
386 is known to have two edges, one of which must reach BB). */
387
388 static void
389 replace_phi_edge_with_variable (basic_block cond_block,
390 edge e, gphi *phi, tree new_tree)
391 {
392 basic_block bb = gimple_bb (phi);
393 basic_block block_to_remove;
394 gimple_stmt_iterator gsi;
395 tree phi_result = PHI_RESULT (phi);
396
397 /* Duplicate range info if we're the only things setting the target PHI.
398 This is needed as later on, the new_tree will be replacing
399 The assignement of the PHI.
400 For an example:
401 bb1:
402 _4 = min<a_1, 255>
403 goto bb2
404
405 range<-INF,255>
406 a_3 = PHI<_4(1)>
407 bb3:
408
409 use(a_3)
410 And _4 gets prograted into the use of a_3 and losing the range info.
411 This can't be done for more than 2 incoming edges as the progration
412 won't happen. */
413 if (TREE_CODE (new_tree) == SSA_NAME
414 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
415 && INTEGRAL_TYPE_P (TREE_TYPE (phi_result))
416 && !SSA_NAME_RANGE_INFO (new_tree)
417 && SSA_NAME_RANGE_INFO (phi_result))
418 duplicate_ssa_name_range_info (new_tree,
419 SSA_NAME_RANGE_TYPE (phi_result),
420 SSA_NAME_RANGE_INFO (phi_result));
421
422 /* Change the PHI argument to new. */
423 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
424
425 /* Remove the empty basic block. */
426 if (EDGE_SUCC (cond_block, 0)->dest == bb)
427 {
428 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
429 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
430 EDGE_SUCC (cond_block, 0)->probability = profile_probability::always ();
431
432 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
433 }
434 else
435 {
436 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
437 EDGE_SUCC (cond_block, 1)->flags
438 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
439 EDGE_SUCC (cond_block, 1)->probability = profile_probability::always ();
440
441 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
442 }
443 delete_basic_block (block_to_remove);
444
445 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
446 gsi = gsi_last_bb (cond_block);
447 gsi_remove (&gsi, true);
448
449 statistics_counter_event (cfun, "Replace PHI with variable", 1);
450
451 if (dump_file && (dump_flags & TDF_DETAILS))
452 fprintf (dump_file,
453 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
454 cond_block->index,
455 bb->index);
456 }
457
458 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
459 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
460 to the result of PHI stmt. COND_STMT is the controlling predicate.
461 Return the newly-created PHI, if any. */
462
463 static gphi *
464 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
465 tree arg0, tree arg1, gimple *cond_stmt)
466 {
467 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
468 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
469 tree temp, result;
470 gphi *newphi;
471 gimple_stmt_iterator gsi, gsi_for_def;
472 location_t locus = gimple_location (phi);
473 enum tree_code convert_code;
474
475 /* Handle only PHI statements with two arguments. TODO: If all
476 other arguments to PHI are INTEGER_CST or if their defining
477 statement have the same unary operation, we can handle more
478 than two arguments too. */
479 if (gimple_phi_num_args (phi) != 2)
480 return NULL;
481
482 /* First canonicalize to simplify tests. */
483 if (TREE_CODE (arg0) != SSA_NAME)
484 {
485 std::swap (arg0, arg1);
486 std::swap (e0, e1);
487 }
488
489 if (TREE_CODE (arg0) != SSA_NAME
490 || (TREE_CODE (arg1) != SSA_NAME
491 && TREE_CODE (arg1) != INTEGER_CST))
492 return NULL;
493
494 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
495 a conversion. */
496 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
497 if (!gimple_assign_cast_p (arg0_def_stmt))
498 return NULL;
499
500 /* Use the RHS as new_arg0. */
501 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
502 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
503 if (convert_code == VIEW_CONVERT_EXPR)
504 {
505 new_arg0 = TREE_OPERAND (new_arg0, 0);
506 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
507 return NULL;
508 }
509 if (TREE_CODE (new_arg0) == SSA_NAME
510 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg0))
511 return NULL;
512
513 if (TREE_CODE (arg1) == SSA_NAME)
514 {
515 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
516 is a conversion. */
517 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
518 if (!is_gimple_assign (arg1_def_stmt)
519 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
520 return NULL;
521
522 /* Either arg1_def_stmt or arg0_def_stmt should be conditional. */
523 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt))
524 && dominated_by_p (CDI_DOMINATORS,
525 gimple_bb (phi), gimple_bb (arg1_def_stmt)))
526 return NULL;
527
528 /* Use the RHS as new_arg1. */
529 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
530 if (convert_code == VIEW_CONVERT_EXPR)
531 new_arg1 = TREE_OPERAND (new_arg1, 0);
532 if (TREE_CODE (new_arg1) == SSA_NAME
533 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg1))
534 return NULL;
535 }
536 else
537 {
538 /* arg0_def_stmt should be conditional. */
539 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt)))
540 return NULL;
541 /* If arg1 is an INTEGER_CST, fold it to new type. */
542 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
543 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
544 {
545 if (gimple_assign_cast_p (arg0_def_stmt))
546 {
547 /* For the INTEGER_CST case, we are just moving the
548 conversion from one place to another, which can often
549 hurt as the conversion moves further away from the
550 statement that computes the value. So, perform this
551 only if new_arg0 is an operand of COND_STMT, or
552 if arg0_def_stmt is the only non-debug stmt in
553 its basic block, because then it is possible this
554 could enable further optimizations (minmax replacement
555 etc.). See PR71016. */
556 if (new_arg0 != gimple_cond_lhs (cond_stmt)
557 && new_arg0 != gimple_cond_rhs (cond_stmt)
558 && gimple_bb (arg0_def_stmt) == e0->src)
559 {
560 gsi = gsi_for_stmt (arg0_def_stmt);
561 gsi_prev_nondebug (&gsi);
562 if (!gsi_end_p (gsi))
563 {
564 if (gassign *assign
565 = dyn_cast <gassign *> (gsi_stmt (gsi)))
566 {
567 tree lhs = gimple_assign_lhs (assign);
568 enum tree_code ass_code
569 = gimple_assign_rhs_code (assign);
570 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
571 return NULL;
572 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
573 return NULL;
574 gsi_prev_nondebug (&gsi);
575 if (!gsi_end_p (gsi))
576 return NULL;
577 }
578 else
579 return NULL;
580 }
581 gsi = gsi_for_stmt (arg0_def_stmt);
582 gsi_next_nondebug (&gsi);
583 if (!gsi_end_p (gsi))
584 return NULL;
585 }
586 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
587 }
588 else
589 return NULL;
590 }
591 else
592 return NULL;
593 }
594
595 /* If arg0/arg1 have > 1 use, then this transformation actually increases
596 the number of expressions evaluated at runtime. */
597 if (!has_single_use (arg0)
598 || (arg1_def_stmt && !has_single_use (arg1)))
599 return NULL;
600
601 /* If types of new_arg0 and new_arg1 are different bailout. */
602 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
603 return NULL;
604
605 /* Create a new PHI stmt. */
606 result = PHI_RESULT (phi);
607 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
608 newphi = create_phi_node (temp, gimple_bb (phi));
609
610 if (dump_file && (dump_flags & TDF_DETAILS))
611 {
612 fprintf (dump_file, "PHI ");
613 print_generic_expr (dump_file, gimple_phi_result (phi));
614 fprintf (dump_file,
615 " changed to factor conversion out from COND_EXPR.\n");
616 fprintf (dump_file, "New stmt with CAST that defines ");
617 print_generic_expr (dump_file, result);
618 fprintf (dump_file, ".\n");
619 }
620
621 /* Remove the old cast(s) that has single use. */
622 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
623 gsi_remove (&gsi_for_def, true);
624 release_defs (arg0_def_stmt);
625
626 if (arg1_def_stmt)
627 {
628 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
629 gsi_remove (&gsi_for_def, true);
630 release_defs (arg1_def_stmt);
631 }
632
633 add_phi_arg (newphi, new_arg0, e0, locus);
634 add_phi_arg (newphi, new_arg1, e1, locus);
635
636 /* Create the conversion stmt and insert it. */
637 if (convert_code == VIEW_CONVERT_EXPR)
638 {
639 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
640 new_stmt = gimple_build_assign (result, temp);
641 }
642 else
643 new_stmt = gimple_build_assign (result, convert_code, temp);
644 gsi = gsi_after_labels (gimple_bb (phi));
645 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
646
647 /* Remove the original PHI stmt. */
648 gsi = gsi_for_stmt (phi);
649 gsi_remove (&gsi, true);
650
651 statistics_counter_event (cfun, "factored out cast", 1);
652
653 return newphi;
654 }
655
656 /* Optimize
657 # x_5 in range [cst1, cst2] where cst2 = cst1 + 1
658 if (x_5 op cstN) # where op is == or != and N is 1 or 2
659 goto bb3;
660 else
661 goto bb4;
662 bb3:
663 bb4:
664 # r_6 = PHI<cst3(2), cst4(3)> # where cst3 == cst4 + 1 or cst4 == cst3 + 1
665
666 to r_6 = x_5 + (min (cst3, cst4) - cst1) or
667 r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
668 of cst3 and cst4 is smaller. */
669
670 static bool
671 two_value_replacement (basic_block cond_bb, basic_block middle_bb,
672 edge e1, gphi *phi, tree arg0, tree arg1)
673 {
674 /* Only look for adjacent integer constants. */
675 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
676 || !INTEGRAL_TYPE_P (TREE_TYPE (arg1))
677 || TREE_CODE (arg0) != INTEGER_CST
678 || TREE_CODE (arg1) != INTEGER_CST
679 || (tree_int_cst_lt (arg0, arg1)
680 ? wi::to_widest (arg0) + 1 != wi::to_widest (arg1)
681 : wi::to_widest (arg1) + 1 != wi::to_widest (arg0)))
682 return false;
683
684 if (!empty_block_p (middle_bb))
685 return false;
686
687 gimple *stmt = last_stmt (cond_bb);
688 tree lhs = gimple_cond_lhs (stmt);
689 tree rhs = gimple_cond_rhs (stmt);
690
691 if (TREE_CODE (lhs) != SSA_NAME
692 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
693 || TREE_CODE (rhs) != INTEGER_CST)
694 return false;
695
696 switch (gimple_cond_code (stmt))
697 {
698 case EQ_EXPR:
699 case NE_EXPR:
700 break;
701 default:
702 return false;
703 }
704
705 /* Defer boolean x ? 0 : {1,-1} or x ? {1,-1} : 0 to
706 match_simplify_replacement. */
707 if (TREE_CODE (TREE_TYPE (lhs)) == BOOLEAN_TYPE
708 && (integer_zerop (arg0)
709 || integer_zerop (arg1)
710 || TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
711 || (TYPE_PRECISION (TREE_TYPE (arg0))
712 <= TYPE_PRECISION (TREE_TYPE (lhs)))))
713 return false;
714
715 wide_int min, max;
716 value_range r;
717 get_range_query (cfun)->range_of_expr (r, lhs);
718
719 if (r.kind () == VR_RANGE)
720 {
721 min = r.lower_bound ();
722 max = r.upper_bound ();
723 }
724 else
725 {
726 int prec = TYPE_PRECISION (TREE_TYPE (lhs));
727 signop sgn = TYPE_SIGN (TREE_TYPE (lhs));
728 min = wi::min_value (prec, sgn);
729 max = wi::max_value (prec, sgn);
730 }
731 if (min + 1 != max
732 || (wi::to_wide (rhs) != min
733 && wi::to_wide (rhs) != max))
734 return false;
735
736 /* We need to know which is the true edge and which is the false
737 edge so that we know when to invert the condition below. */
738 edge true_edge, false_edge;
739 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
740 if ((gimple_cond_code (stmt) == EQ_EXPR)
741 ^ (wi::to_wide (rhs) == max)
742 ^ (e1 == false_edge))
743 std::swap (arg0, arg1);
744
745 tree type;
746 if (TYPE_PRECISION (TREE_TYPE (lhs)) == TYPE_PRECISION (TREE_TYPE (arg0)))
747 {
748 /* Avoid performing the arithmetics in bool type which has different
749 semantics, otherwise prefer unsigned types from the two with
750 the same precision. */
751 if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
752 || !TYPE_UNSIGNED (TREE_TYPE (arg0)))
753 type = TREE_TYPE (lhs);
754 else
755 type = TREE_TYPE (arg0);
756 }
757 else if (TYPE_PRECISION (TREE_TYPE (lhs)) > TYPE_PRECISION (TREE_TYPE (arg0)))
758 type = TREE_TYPE (lhs);
759 else
760 type = TREE_TYPE (arg0);
761
762 min = wide_int::from (min, TYPE_PRECISION (type),
763 TYPE_SIGN (TREE_TYPE (lhs)));
764 wide_int a = wide_int::from (wi::to_wide (arg0), TYPE_PRECISION (type),
765 TYPE_SIGN (TREE_TYPE (arg0)));
766 enum tree_code code;
767 wi::overflow_type ovf;
768 if (tree_int_cst_lt (arg0, arg1))
769 {
770 code = PLUS_EXPR;
771 a -= min;
772 if (!TYPE_UNSIGNED (type))
773 {
774 /* lhs is known to be in range [min, min+1] and we want to add a
775 to it. Check if that operation can overflow for those 2 values
776 and if yes, force unsigned type. */
777 wi::add (min + (wi::neg_p (a) ? 0 : 1), a, SIGNED, &ovf);
778 if (ovf)
779 type = unsigned_type_for (type);
780 }
781 }
782 else
783 {
784 code = MINUS_EXPR;
785 a += min;
786 if (!TYPE_UNSIGNED (type))
787 {
788 /* lhs is known to be in range [min, min+1] and we want to subtract
789 it from a. Check if that operation can overflow for those 2
790 values and if yes, force unsigned type. */
791 wi::sub (a, min + (wi::neg_p (min) ? 0 : 1), SIGNED, &ovf);
792 if (ovf)
793 type = unsigned_type_for (type);
794 }
795 }
796
797 tree arg = wide_int_to_tree (type, a);
798 gimple_seq stmts = NULL;
799 lhs = gimple_convert (&stmts, type, lhs);
800 tree new_rhs;
801 if (code == PLUS_EXPR)
802 new_rhs = gimple_build (&stmts, PLUS_EXPR, type, lhs, arg);
803 else
804 new_rhs = gimple_build (&stmts, MINUS_EXPR, type, arg, lhs);
805 new_rhs = gimple_convert (&stmts, TREE_TYPE (arg0), new_rhs);
806 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
807 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
808
809 replace_phi_edge_with_variable (cond_bb, e1, phi, new_rhs);
810
811 /* Note that we optimized this PHI. */
812 return true;
813 }
814
815 /* Return TRUE if CODE should be allowed during early phiopt.
816 Currently this is to allow MIN/MAX and ABS/NEGATE. */
817 static bool
818 phiopt_early_allow (enum tree_code code)
819 {
820 switch (code)
821 {
822 case MIN_EXPR:
823 case MAX_EXPR:
824 case ABS_EXPR:
825 case ABSU_EXPR:
826 case NEGATE_EXPR:
827 case SSA_NAME:
828 return true;
829 default:
830 return false;
831 }
832 }
833
834 /* gimple_simplify_phiopt is like gimple_simplify but designed for PHIOPT.
835 Return NULL if nothing can be simplified or the resulting simplified value
836 with parts pushed if EARLY_P was true. Also rejects non allowed tree code
837 if EARLY_P is set.
838 Takes the comparison from COMP_STMT and two args, ARG0 and ARG1 and tries
839 to simplify CMP ? ARG0 : ARG1.
840 Also try to simplify (!CMP) ? ARG1 : ARG0 if the non-inverse failed. */
841 static tree
842 gimple_simplify_phiopt (bool early_p, tree type, gimple *comp_stmt,
843 tree arg0, tree arg1,
844 gimple_seq *seq)
845 {
846 tree result;
847 enum tree_code comp_code = gimple_cond_code (comp_stmt);
848 location_t loc = gimple_location (comp_stmt);
849 tree cmp0 = gimple_cond_lhs (comp_stmt);
850 tree cmp1 = gimple_cond_rhs (comp_stmt);
851 /* To handle special cases like floating point comparison, it is easier and
852 less error-prone to build a tree and gimplify it on the fly though it is
853 less efficient.
854 Don't use fold_build2 here as that might create (bool)a instead of just
855 "a != 0". */
856 tree cond = build2_loc (loc, comp_code, boolean_type_node,
857 cmp0, cmp1);
858 gimple_match_op op (gimple_match_cond::UNCOND,
859 COND_EXPR, type, cond, arg0, arg1);
860
861 if (op.resimplify (early_p ? NULL : seq, follow_all_ssa_edges))
862 {
863 /* Early we want only to allow some generated tree codes. */
864 if (!early_p
865 || op.code.is_tree_code ()
866 || phiopt_early_allow ((tree_code)op.code))
867 {
868 result = maybe_push_res_to_seq (&op, seq);
869 if (result)
870 return result;
871 }
872 }
873 /* Try the inverted comparison, that is !COMP ? ARG1 : ARG0. */
874 comp_code = invert_tree_comparison (comp_code, HONOR_NANS (cmp0));
875
876 if (comp_code == ERROR_MARK)
877 return NULL;
878
879 cond = build2_loc (loc,
880 comp_code, boolean_type_node,
881 cmp0, cmp1);
882 gimple_match_op op1 (gimple_match_cond::UNCOND,
883 COND_EXPR, type, cond, arg1, arg0);
884
885 if (op1.resimplify (early_p ? NULL : seq, follow_all_ssa_edges))
886 {
887 /* Early we want only to allow some generated tree codes. */
888 if (!early_p
889 || op1.code.is_tree_code ()
890 || phiopt_early_allow ((tree_code)op1.code))
891 {
892 result = maybe_push_res_to_seq (&op1, seq);
893 if (result)
894 return result;
895 }
896 }
897
898 return NULL;
899 }
900
901 /* The function match_simplify_replacement does the main work of doing the
902 replacement using match and simplify. Return true if the replacement is done.
903 Otherwise return false.
904 BB is the basic block where the replacement is going to be done on. ARG0
905 is argument 0 from PHI. Likewise for ARG1. */
906
907 static bool
908 match_simplify_replacement (basic_block cond_bb, basic_block middle_bb,
909 edge e0, edge e1, gphi *phi,
910 tree arg0, tree arg1, bool early_p)
911 {
912 gimple *stmt;
913 gimple_stmt_iterator gsi;
914 edge true_edge, false_edge;
915 gimple_seq seq = NULL;
916 tree result;
917 gimple *stmt_to_move = NULL;
918
919 /* Special case A ? B : B as this will always simplify to B. */
920 if (operand_equal_for_phi_arg_p (arg0, arg1))
921 return false;
922
923 /* If the basic block only has a cheap preparation statement,
924 allow it and move it once the transformation is done. */
925 if (!empty_block_p (middle_bb))
926 {
927 stmt_to_move = last_and_only_stmt (middle_bb);
928 if (!stmt_to_move)
929 return false;
930
931 if (gimple_vuse (stmt_to_move))
932 return false;
933
934 if (gimple_could_trap_p (stmt_to_move)
935 || gimple_has_side_effects (stmt_to_move))
936 return false;
937
938 if (gimple_uses_undefined_value_p (stmt_to_move))
939 return false;
940
941 /* Allow assignments and not no calls.
942 As const calls don't match any of the above, yet they could
943 still have some side-effects - they could contain
944 gimple_could_trap_p statements, like floating point
945 exceptions or integer division by zero. See PR70586.
946 FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
947 should handle this. */
948 if (!is_gimple_assign (stmt_to_move))
949 return false;
950
951 tree lhs = gimple_assign_lhs (stmt_to_move);
952 gimple *use_stmt;
953 use_operand_p use_p;
954
955 /* Allow only a statement which feeds into the phi. */
956 if (!lhs || TREE_CODE (lhs) != SSA_NAME
957 || !single_imm_use (lhs, &use_p, &use_stmt)
958 || use_stmt != phi)
959 return false;
960 }
961
962 /* At this point we know we have a GIMPLE_COND with two successors.
963 One successor is BB, the other successor is an empty block which
964 falls through into BB.
965
966 There is a single PHI node at the join point (BB).
967
968 So, given the condition COND, and the two PHI arguments, match and simplify
969 can happen on (COND) ? arg0 : arg1. */
970
971 stmt = last_stmt (cond_bb);
972
973 /* We need to know which is the true edge and which is the false
974 edge so that we know when to invert the condition below. */
975 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
976 if (e1 == true_edge || e0 == false_edge)
977 std::swap (arg0, arg1);
978
979 tree type = TREE_TYPE (gimple_phi_result (phi));
980 result = gimple_simplify_phiopt (early_p, type, stmt,
981 arg0, arg1,
982 &seq);
983 if (!result)
984 return false;
985
986 gsi = gsi_last_bb (cond_bb);
987 if (stmt_to_move)
988 {
989 if (dump_file && (dump_flags & TDF_DETAILS))
990 {
991 fprintf (dump_file, "statement un-sinked:\n");
992 print_gimple_stmt (dump_file, stmt_to_move, 0,
993 TDF_VOPS|TDF_MEMSYMS);
994 }
995 gimple_stmt_iterator gsi1 = gsi_for_stmt (stmt_to_move);
996 gsi_move_before (&gsi1, &gsi);
997 reset_flow_sensitive_info (gimple_assign_lhs (stmt_to_move));
998 }
999 if (seq)
1000 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
1001
1002 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1003
1004 /* Add Statistic here even though replace_phi_edge_with_variable already
1005 does it as we want to be able to count when match-simplify happens vs
1006 the others. */
1007 statistics_counter_event (cfun, "match-simplify PHI replacement", 1);
1008
1009 /* Note that we optimized this PHI. */
1010 return true;
1011 }
1012
1013 /* Update *ARG which is defined in STMT so that it contains the
1014 computed value if that seems profitable. Return true if the
1015 statement is made dead by that rewriting. */
1016
1017 static bool
1018 jump_function_from_stmt (tree *arg, gimple *stmt)
1019 {
1020 enum tree_code code = gimple_assign_rhs_code (stmt);
1021 if (code == ADDR_EXPR)
1022 {
1023 /* For arg = &p->i transform it to p, if possible. */
1024 tree rhs1 = gimple_assign_rhs1 (stmt);
1025 poly_int64 offset;
1026 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
1027 &offset);
1028 if (tem
1029 && TREE_CODE (tem) == MEM_REF
1030 && known_eq (mem_ref_offset (tem) + offset, 0))
1031 {
1032 *arg = TREE_OPERAND (tem, 0);
1033 return true;
1034 }
1035 }
1036 /* TODO: Much like IPA-CP jump-functions we want to handle constant
1037 additions symbolically here, and we'd need to update the comparison
1038 code that compares the arg + cst tuples in our caller. For now the
1039 code above exactly handles the VEC_BASE pattern from vec.h. */
1040 return false;
1041 }
1042
1043 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
1044 of the form SSA_NAME NE 0.
1045
1046 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
1047 the two input values of the EQ_EXPR match arg0 and arg1.
1048
1049 If so update *code and return TRUE. Otherwise return FALSE. */
1050
1051 static bool
1052 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
1053 enum tree_code *code, const_tree rhs)
1054 {
1055 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
1056 statement. */
1057 if (TREE_CODE (rhs) == SSA_NAME)
1058 {
1059 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
1060
1061 /* Verify the defining statement has an EQ_EXPR on the RHS. */
1062 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
1063 {
1064 /* Finally verify the source operands of the EQ_EXPR are equal
1065 to arg0 and arg1. */
1066 tree op0 = gimple_assign_rhs1 (def1);
1067 tree op1 = gimple_assign_rhs2 (def1);
1068 if ((operand_equal_for_phi_arg_p (arg0, op0)
1069 && operand_equal_for_phi_arg_p (arg1, op1))
1070 || (operand_equal_for_phi_arg_p (arg0, op1)
1071 && operand_equal_for_phi_arg_p (arg1, op0)))
1072 {
1073 /* We will perform the optimization. */
1074 *code = gimple_assign_rhs_code (def1);
1075 return true;
1076 }
1077 }
1078 }
1079 return false;
1080 }
1081
1082 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
1083
1084 Also return TRUE if arg0/arg1 are equal to the source arguments of a
1085 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
1086
1087 Return FALSE otherwise. */
1088
1089 static bool
1090 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
1091 enum tree_code *code, gimple *cond)
1092 {
1093 gimple *def;
1094 tree lhs = gimple_cond_lhs (cond);
1095 tree rhs = gimple_cond_rhs (cond);
1096
1097 if ((operand_equal_for_phi_arg_p (arg0, lhs)
1098 && operand_equal_for_phi_arg_p (arg1, rhs))
1099 || (operand_equal_for_phi_arg_p (arg1, lhs)
1100 && operand_equal_for_phi_arg_p (arg0, rhs)))
1101 return true;
1102
1103 /* Now handle more complex case where we have an EQ comparison
1104 which feeds a BIT_AND_EXPR which feeds COND.
1105
1106 First verify that COND is of the form SSA_NAME NE 0. */
1107 if (*code != NE_EXPR || !integer_zerop (rhs)
1108 || TREE_CODE (lhs) != SSA_NAME)
1109 return false;
1110
1111 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
1112 def = SSA_NAME_DEF_STMT (lhs);
1113 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
1114 return false;
1115
1116 /* Now verify arg0/arg1 correspond to the source arguments of an
1117 EQ comparison feeding the BIT_AND_EXPR. */
1118
1119 tree tmp = gimple_assign_rhs1 (def);
1120 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1121 return true;
1122
1123 tmp = gimple_assign_rhs2 (def);
1124 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1125 return true;
1126
1127 return false;
1128 }
1129
1130 /* Returns true if ARG is a neutral element for operation CODE
1131 on the RIGHT side. */
1132
1133 static bool
1134 neutral_element_p (tree_code code, tree arg, bool right)
1135 {
1136 switch (code)
1137 {
1138 case PLUS_EXPR:
1139 case BIT_IOR_EXPR:
1140 case BIT_XOR_EXPR:
1141 return integer_zerop (arg);
1142
1143 case LROTATE_EXPR:
1144 case RROTATE_EXPR:
1145 case LSHIFT_EXPR:
1146 case RSHIFT_EXPR:
1147 case MINUS_EXPR:
1148 case POINTER_PLUS_EXPR:
1149 return right && integer_zerop (arg);
1150
1151 case MULT_EXPR:
1152 return integer_onep (arg);
1153
1154 case TRUNC_DIV_EXPR:
1155 case CEIL_DIV_EXPR:
1156 case FLOOR_DIV_EXPR:
1157 case ROUND_DIV_EXPR:
1158 case EXACT_DIV_EXPR:
1159 return right && integer_onep (arg);
1160
1161 case BIT_AND_EXPR:
1162 return integer_all_onesp (arg);
1163
1164 default:
1165 return false;
1166 }
1167 }
1168
1169 /* Returns true if ARG is an absorbing element for operation CODE. */
1170
1171 static bool
1172 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1173 {
1174 switch (code)
1175 {
1176 case BIT_IOR_EXPR:
1177 return integer_all_onesp (arg);
1178
1179 case MULT_EXPR:
1180 case BIT_AND_EXPR:
1181 return integer_zerop (arg);
1182
1183 case LSHIFT_EXPR:
1184 case RSHIFT_EXPR:
1185 case LROTATE_EXPR:
1186 case RROTATE_EXPR:
1187 return !right && integer_zerop (arg);
1188
1189 case TRUNC_DIV_EXPR:
1190 case CEIL_DIV_EXPR:
1191 case FLOOR_DIV_EXPR:
1192 case ROUND_DIV_EXPR:
1193 case EXACT_DIV_EXPR:
1194 case TRUNC_MOD_EXPR:
1195 case CEIL_MOD_EXPR:
1196 case FLOOR_MOD_EXPR:
1197 case ROUND_MOD_EXPR:
1198 return (!right
1199 && integer_zerop (arg)
1200 && tree_single_nonzero_warnv_p (rval, NULL));
1201
1202 default:
1203 return false;
1204 }
1205 }
1206
1207 /* The function value_replacement does the main work of doing the value
1208 replacement. Return non-zero if the replacement is done. Otherwise return
1209 0. If we remove the middle basic block, return 2.
1210 BB is the basic block where the replacement is going to be done on. ARG0
1211 is argument 0 from the PHI. Likewise for ARG1. */
1212
1213 static int
1214 value_replacement (basic_block cond_bb, basic_block middle_bb,
1215 edge e0, edge e1, gphi *phi, tree arg0, tree arg1)
1216 {
1217 gimple_stmt_iterator gsi;
1218 gimple *cond;
1219 edge true_edge, false_edge;
1220 enum tree_code code;
1221 bool empty_or_with_defined_p = true;
1222
1223 /* If the type says honor signed zeros we cannot do this
1224 optimization. */
1225 if (HONOR_SIGNED_ZEROS (arg1))
1226 return 0;
1227
1228 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1229 arguments, then adjust arg0 or arg1. */
1230 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1231 while (!gsi_end_p (gsi))
1232 {
1233 gimple *stmt = gsi_stmt (gsi);
1234 tree lhs;
1235 gsi_next_nondebug (&gsi);
1236 if (!is_gimple_assign (stmt))
1237 {
1238 if (gimple_code (stmt) != GIMPLE_PREDICT
1239 && gimple_code (stmt) != GIMPLE_NOP)
1240 empty_or_with_defined_p = false;
1241 continue;
1242 }
1243 /* Now try to adjust arg0 or arg1 according to the computation
1244 in the statement. */
1245 lhs = gimple_assign_lhs (stmt);
1246 if (!(lhs == arg0
1247 && jump_function_from_stmt (&arg0, stmt))
1248 || (lhs == arg1
1249 && jump_function_from_stmt (&arg1, stmt)))
1250 empty_or_with_defined_p = false;
1251 }
1252
1253 cond = last_stmt (cond_bb);
1254 code = gimple_cond_code (cond);
1255
1256 /* This transformation is only valid for equality comparisons. */
1257 if (code != NE_EXPR && code != EQ_EXPR)
1258 return 0;
1259
1260 /* We need to know which is the true edge and which is the false
1261 edge so that we know if have abs or negative abs. */
1262 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1263
1264 /* At this point we know we have a COND_EXPR with two successors.
1265 One successor is BB, the other successor is an empty block which
1266 falls through into BB.
1267
1268 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1269
1270 There is a single PHI node at the join point (BB) with two arguments.
1271
1272 We now need to verify that the two arguments in the PHI node match
1273 the two arguments to the equality comparison. */
1274
1275 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
1276 {
1277 edge e;
1278 tree arg;
1279
1280 /* For NE_EXPR, we want to build an assignment result = arg where
1281 arg is the PHI argument associated with the true edge. For
1282 EQ_EXPR we want the PHI argument associated with the false edge. */
1283 e = (code == NE_EXPR ? true_edge : false_edge);
1284
1285 /* Unfortunately, E may not reach BB (it may instead have gone to
1286 OTHER_BLOCK). If that is the case, then we want the single outgoing
1287 edge from OTHER_BLOCK which reaches BB and represents the desired
1288 path from COND_BLOCK. */
1289 if (e->dest == middle_bb)
1290 e = single_succ_edge (e->dest);
1291
1292 /* Now we know the incoming edge to BB that has the argument for the
1293 RHS of our new assignment statement. */
1294 if (e0 == e)
1295 arg = arg0;
1296 else
1297 arg = arg1;
1298
1299 /* If the middle basic block was empty or is defining the
1300 PHI arguments and this is a single phi where the args are different
1301 for the edges e0 and e1 then we can remove the middle basic block. */
1302 if (empty_or_with_defined_p
1303 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1304 e0, e1) == phi)
1305 {
1306 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1307 /* Note that we optimized this PHI. */
1308 return 2;
1309 }
1310 else
1311 {
1312 statistics_counter_event (cfun, "Replace PHI with variable/value_replacement", 1);
1313
1314 /* Replace the PHI arguments with arg. */
1315 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1316 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1317 if (dump_file && (dump_flags & TDF_DETAILS))
1318 {
1319 fprintf (dump_file, "PHI ");
1320 print_generic_expr (dump_file, gimple_phi_result (phi));
1321 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1322 cond_bb->index);
1323 print_generic_expr (dump_file, arg);
1324 fprintf (dump_file, ".\n");
1325 }
1326 return 1;
1327 }
1328
1329 }
1330
1331 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1332 gsi = gsi_last_nondebug_bb (middle_bb);
1333 if (gsi_end_p (gsi))
1334 return 0;
1335
1336 gimple *assign = gsi_stmt (gsi);
1337 if (!is_gimple_assign (assign)
1338 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
1339 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1340 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1341 return 0;
1342
1343 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1344 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1345 return 0;
1346
1347 /* Allow up to 2 cheap preparation statements that prepare argument
1348 for assign, e.g.:
1349 if (y_4 != 0)
1350 goto <bb 3>;
1351 else
1352 goto <bb 4>;
1353 <bb 3>:
1354 _1 = (int) y_4;
1355 iftmp.0_6 = x_5(D) r<< _1;
1356 <bb 4>:
1357 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1358 or:
1359 if (y_3(D) == 0)
1360 goto <bb 4>;
1361 else
1362 goto <bb 3>;
1363 <bb 3>:
1364 y_4 = y_3(D) & 31;
1365 _1 = (int) y_4;
1366 _6 = x_5(D) r<< _1;
1367 <bb 4>:
1368 # _2 = PHI <x_5(D)(2), _6(3)> */
1369 gimple *prep_stmt[2] = { NULL, NULL };
1370 int prep_cnt;
1371 for (prep_cnt = 0; ; prep_cnt++)
1372 {
1373 gsi_prev_nondebug (&gsi);
1374 if (gsi_end_p (gsi))
1375 break;
1376
1377 gimple *g = gsi_stmt (gsi);
1378 if (gimple_code (g) == GIMPLE_LABEL)
1379 break;
1380
1381 if (prep_cnt == 2 || !is_gimple_assign (g))
1382 return 0;
1383
1384 tree lhs = gimple_assign_lhs (g);
1385 tree rhs1 = gimple_assign_rhs1 (g);
1386 use_operand_p use_p;
1387 gimple *use_stmt;
1388 if (TREE_CODE (lhs) != SSA_NAME
1389 || TREE_CODE (rhs1) != SSA_NAME
1390 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1391 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1392 || !single_imm_use (lhs, &use_p, &use_stmt)
1393 || use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign))
1394 return 0;
1395 switch (gimple_assign_rhs_code (g))
1396 {
1397 CASE_CONVERT:
1398 break;
1399 case PLUS_EXPR:
1400 case BIT_AND_EXPR:
1401 case BIT_IOR_EXPR:
1402 case BIT_XOR_EXPR:
1403 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1404 return 0;
1405 break;
1406 default:
1407 return 0;
1408 }
1409 prep_stmt[prep_cnt] = g;
1410 }
1411
1412 /* Only transform if it removes the condition. */
1413 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1414 return 0;
1415
1416 /* Size-wise, this is always profitable. */
1417 if (optimize_bb_for_speed_p (cond_bb)
1418 /* The special case is useless if it has a low probability. */
1419 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1420 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1421 /* If assign is cheap, there is no point avoiding it. */
1422 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1423 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1424 return 0;
1425
1426 tree lhs = gimple_assign_lhs (assign);
1427 tree rhs1 = gimple_assign_rhs1 (assign);
1428 tree rhs2 = gimple_assign_rhs2 (assign);
1429 enum tree_code code_def = gimple_assign_rhs_code (assign);
1430 tree cond_lhs = gimple_cond_lhs (cond);
1431 tree cond_rhs = gimple_cond_rhs (cond);
1432
1433 /* Propagate the cond_rhs constant through preparation stmts,
1434 make sure UB isn't invoked while doing that. */
1435 for (int i = prep_cnt - 1; i >= 0; --i)
1436 {
1437 gimple *g = prep_stmt[i];
1438 tree grhs1 = gimple_assign_rhs1 (g);
1439 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1440 return 0;
1441 cond_lhs = gimple_assign_lhs (g);
1442 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1443 if (TREE_CODE (cond_rhs) != INTEGER_CST
1444 || TREE_OVERFLOW (cond_rhs))
1445 return 0;
1446 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1447 {
1448 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1449 gimple_assign_rhs2 (g));
1450 if (TREE_OVERFLOW (cond_rhs))
1451 return 0;
1452 }
1453 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1454 if (TREE_CODE (cond_rhs) != INTEGER_CST
1455 || TREE_OVERFLOW (cond_rhs))
1456 return 0;
1457 }
1458
1459 if (((code == NE_EXPR && e1 == false_edge)
1460 || (code == EQ_EXPR && e1 == true_edge))
1461 && arg0 == lhs
1462 && ((arg1 == rhs1
1463 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1464 && neutral_element_p (code_def, cond_rhs, true))
1465 || (arg1 == rhs2
1466 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1467 && neutral_element_p (code_def, cond_rhs, false))
1468 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
1469 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1470 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1471 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1472 && absorbing_element_p (code_def,
1473 cond_rhs, false, rhs2))))))
1474 {
1475 gsi = gsi_for_stmt (cond);
1476 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1477 def-stmt in:
1478 if (n_5 != 0)
1479 goto <bb 3>;
1480 else
1481 goto <bb 4>;
1482
1483 <bb 3>:
1484 # RANGE [0, 4294967294]
1485 u_6 = n_5 + 4294967295;
1486
1487 <bb 4>:
1488 # u_3 = PHI <u_6(3), 4294967295(2)> */
1489 reset_flow_sensitive_info (lhs);
1490 gimple_stmt_iterator gsi_from;
1491 for (int i = prep_cnt - 1; i >= 0; --i)
1492 {
1493 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1494 reset_flow_sensitive_info (plhs);
1495 gsi_from = gsi_for_stmt (prep_stmt[i]);
1496 gsi_move_before (&gsi_from, &gsi);
1497 }
1498 gsi_from = gsi_for_stmt (assign);
1499 gsi_move_before (&gsi_from, &gsi);
1500 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1501 return 2;
1502 }
1503
1504 return 0;
1505 }
1506
1507 /* The function minmax_replacement does the main work of doing the minmax
1508 replacement. Return true if the replacement is done. Otherwise return
1509 false.
1510 BB is the basic block where the replacement is going to be done on. ARG0
1511 is argument 0 from the PHI. Likewise for ARG1. */
1512
1513 static bool
1514 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1515 edge e0, edge e1, gphi *phi, tree arg0, tree arg1)
1516 {
1517 tree result;
1518 edge true_edge, false_edge;
1519 enum tree_code minmax, ass_code;
1520 tree smaller, larger, arg_true, arg_false;
1521 gimple_stmt_iterator gsi, gsi_from;
1522
1523 tree type = TREE_TYPE (PHI_RESULT (phi));
1524
1525 /* The optimization may be unsafe due to NaNs. */
1526 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1527 return false;
1528
1529 gcond *cond = as_a <gcond *> (last_stmt (cond_bb));
1530 enum tree_code cmp = gimple_cond_code (cond);
1531 tree rhs = gimple_cond_rhs (cond);
1532
1533 /* Turn EQ/NE of extreme values to order comparisons. */
1534 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1535 && TREE_CODE (rhs) == INTEGER_CST
1536 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1537 {
1538 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1539 {
1540 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1541 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1542 wi::min_value (TREE_TYPE (rhs)) + 1);
1543 }
1544 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1545 {
1546 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1547 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1548 wi::max_value (TREE_TYPE (rhs)) - 1);
1549 }
1550 }
1551
1552 /* This transformation is only valid for order comparisons. Record which
1553 operand is smaller/larger if the result of the comparison is true. */
1554 tree alt_smaller = NULL_TREE;
1555 tree alt_larger = NULL_TREE;
1556 if (cmp == LT_EXPR || cmp == LE_EXPR)
1557 {
1558 smaller = gimple_cond_lhs (cond);
1559 larger = rhs;
1560 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1561 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1562 if (TREE_CODE (larger) == INTEGER_CST
1563 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1564 {
1565 if (cmp == LT_EXPR)
1566 {
1567 wi::overflow_type overflow;
1568 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1569 TYPE_SIGN (TREE_TYPE (larger)),
1570 &overflow);
1571 if (! overflow)
1572 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1573 }
1574 else
1575 {
1576 wi::overflow_type overflow;
1577 wide_int alt = wi::add (wi::to_wide (larger), 1,
1578 TYPE_SIGN (TREE_TYPE (larger)),
1579 &overflow);
1580 if (! overflow)
1581 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1582 }
1583 }
1584 }
1585 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1586 {
1587 smaller = rhs;
1588 larger = gimple_cond_lhs (cond);
1589 /* If we have larger > CST it is equivalent to larger >= CST+1.
1590 Likewise larger >= CST is equivalent to larger > CST-1. */
1591 if (TREE_CODE (smaller) == INTEGER_CST
1592 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1593 {
1594 wi::overflow_type overflow;
1595 if (cmp == GT_EXPR)
1596 {
1597 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1598 TYPE_SIGN (TREE_TYPE (smaller)),
1599 &overflow);
1600 if (! overflow)
1601 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1602 }
1603 else
1604 {
1605 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1606 TYPE_SIGN (TREE_TYPE (smaller)),
1607 &overflow);
1608 if (! overflow)
1609 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1610 }
1611 }
1612 }
1613 else
1614 return false;
1615
1616 /* Handle the special case of (signed_type)x < 0 being equivalent
1617 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1618 to x <= MAX_VAL(signed_type). */
1619 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1620 && INTEGRAL_TYPE_P (type)
1621 && TYPE_UNSIGNED (type)
1622 && integer_zerop (rhs))
1623 {
1624 tree op = gimple_cond_lhs (cond);
1625 if (TREE_CODE (op) == SSA_NAME
1626 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1627 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1628 {
1629 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1630 if (gimple_assign_cast_p (def_stmt))
1631 {
1632 tree op1 = gimple_assign_rhs1 (def_stmt);
1633 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1634 && TYPE_UNSIGNED (TREE_TYPE (op1))
1635 && (TYPE_PRECISION (TREE_TYPE (op))
1636 == TYPE_PRECISION (TREE_TYPE (op1)))
1637 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1638 {
1639 wide_int w1 = wi::max_value (TREE_TYPE (op));
1640 wide_int w2 = wi::add (w1, 1);
1641 if (cmp == LT_EXPR)
1642 {
1643 larger = op1;
1644 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1645 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1646 alt_larger = NULL_TREE;
1647 }
1648 else
1649 {
1650 smaller = op1;
1651 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1652 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1653 alt_smaller = NULL_TREE;
1654 }
1655 }
1656 }
1657 }
1658 }
1659
1660 /* We need to know which is the true edge and which is the false
1661 edge so that we know if have abs or negative abs. */
1662 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1663
1664 /* Forward the edges over the middle basic block. */
1665 if (true_edge->dest == middle_bb)
1666 true_edge = EDGE_SUCC (true_edge->dest, 0);
1667 if (false_edge->dest == middle_bb)
1668 false_edge = EDGE_SUCC (false_edge->dest, 0);
1669
1670 if (true_edge == e0)
1671 {
1672 gcc_assert (false_edge == e1);
1673 arg_true = arg0;
1674 arg_false = arg1;
1675 }
1676 else
1677 {
1678 gcc_assert (false_edge == e0);
1679 gcc_assert (true_edge == e1);
1680 arg_true = arg1;
1681 arg_false = arg0;
1682 }
1683
1684 if (empty_block_p (middle_bb))
1685 {
1686 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1687 || (alt_smaller
1688 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1689 && (operand_equal_for_phi_arg_p (arg_false, larger)
1690 || (alt_larger
1691 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1692 {
1693 /* Case
1694
1695 if (smaller < larger)
1696 rslt = smaller;
1697 else
1698 rslt = larger; */
1699 minmax = MIN_EXPR;
1700 }
1701 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1702 || (alt_smaller
1703 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1704 && (operand_equal_for_phi_arg_p (arg_true, larger)
1705 || (alt_larger
1706 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1707 minmax = MAX_EXPR;
1708 else
1709 return false;
1710 }
1711 else
1712 {
1713 /* Recognize the following case, assuming d <= u:
1714
1715 if (a <= u)
1716 b = MAX (a, d);
1717 x = PHI <b, u>
1718
1719 This is equivalent to
1720
1721 b = MAX (a, d);
1722 x = MIN (b, u); */
1723
1724 gimple *assign = last_and_only_stmt (middle_bb);
1725 tree lhs, op0, op1, bound;
1726
1727 if (!assign
1728 || gimple_code (assign) != GIMPLE_ASSIGN)
1729 return false;
1730
1731 lhs = gimple_assign_lhs (assign);
1732 ass_code = gimple_assign_rhs_code (assign);
1733 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1734 return false;
1735 op0 = gimple_assign_rhs1 (assign);
1736 op1 = gimple_assign_rhs2 (assign);
1737
1738 if (true_edge->src == middle_bb)
1739 {
1740 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1741 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1742 return false;
1743
1744 if (operand_equal_for_phi_arg_p (arg_false, larger)
1745 || (alt_larger
1746 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1747 {
1748 /* Case
1749
1750 if (smaller < larger)
1751 {
1752 r' = MAX_EXPR (smaller, bound)
1753 }
1754 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1755 if (ass_code != MAX_EXPR)
1756 return false;
1757
1758 minmax = MIN_EXPR;
1759 if (operand_equal_for_phi_arg_p (op0, smaller)
1760 || (alt_smaller
1761 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1762 bound = op1;
1763 else if (operand_equal_for_phi_arg_p (op1, smaller)
1764 || (alt_smaller
1765 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1766 bound = op0;
1767 else
1768 return false;
1769
1770 /* We need BOUND <= LARGER. */
1771 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1772 bound, larger)))
1773 return false;
1774 }
1775 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1776 || (alt_smaller
1777 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1778 {
1779 /* Case
1780
1781 if (smaller < larger)
1782 {
1783 r' = MIN_EXPR (larger, bound)
1784 }
1785 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1786 if (ass_code != MIN_EXPR)
1787 return false;
1788
1789 minmax = MAX_EXPR;
1790 if (operand_equal_for_phi_arg_p (op0, larger)
1791 || (alt_larger
1792 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1793 bound = op1;
1794 else if (operand_equal_for_phi_arg_p (op1, larger)
1795 || (alt_larger
1796 && operand_equal_for_phi_arg_p (op1, alt_larger)))
1797 bound = op0;
1798 else
1799 return false;
1800
1801 /* We need BOUND >= SMALLER. */
1802 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1803 bound, smaller)))
1804 return false;
1805 }
1806 else
1807 return false;
1808 }
1809 else
1810 {
1811 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1812 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1813 return false;
1814
1815 if (operand_equal_for_phi_arg_p (arg_true, larger)
1816 || (alt_larger
1817 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1818 {
1819 /* Case
1820
1821 if (smaller > larger)
1822 {
1823 r' = MIN_EXPR (smaller, bound)
1824 }
1825 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1826 if (ass_code != MIN_EXPR)
1827 return false;
1828
1829 minmax = MAX_EXPR;
1830 if (operand_equal_for_phi_arg_p (op0, smaller)
1831 || (alt_smaller
1832 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1833 bound = op1;
1834 else if (operand_equal_for_phi_arg_p (op1, smaller)
1835 || (alt_smaller
1836 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1837 bound = op0;
1838 else
1839 return false;
1840
1841 /* We need BOUND >= LARGER. */
1842 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1843 bound, larger)))
1844 return false;
1845 }
1846 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1847 || (alt_smaller
1848 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1849 {
1850 /* Case
1851
1852 if (smaller > larger)
1853 {
1854 r' = MAX_EXPR (larger, bound)
1855 }
1856 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1857 if (ass_code != MAX_EXPR)
1858 return false;
1859
1860 minmax = MIN_EXPR;
1861 if (operand_equal_for_phi_arg_p (op0, larger))
1862 bound = op1;
1863 else if (operand_equal_for_phi_arg_p (op1, larger))
1864 bound = op0;
1865 else
1866 return false;
1867
1868 /* We need BOUND <= SMALLER. */
1869 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1870 bound, smaller)))
1871 return false;
1872 }
1873 else
1874 return false;
1875 }
1876
1877 /* Move the statement from the middle block. */
1878 gsi = gsi_last_bb (cond_bb);
1879 gsi_from = gsi_last_nondebug_bb (middle_bb);
1880 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
1881 SSA_OP_DEF));
1882 gsi_move_before (&gsi_from, &gsi);
1883 }
1884
1885 /* Emit the statement to compute min/max. */
1886 gimple_seq stmts = NULL;
1887 tree phi_result = PHI_RESULT (phi);
1888 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
1889
1890 gsi = gsi_last_bb (cond_bb);
1891 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
1892
1893 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1894
1895 return true;
1896 }
1897
1898 /* Return true if the only executable statement in BB is a GIMPLE_COND. */
1899
1900 static bool
1901 cond_only_block_p (basic_block bb)
1902 {
1903 /* BB must have no executable statements. */
1904 gimple_stmt_iterator gsi = gsi_after_labels (bb);
1905 if (phi_nodes (bb))
1906 return false;
1907 while (!gsi_end_p (gsi))
1908 {
1909 gimple *stmt = gsi_stmt (gsi);
1910 if (is_gimple_debug (stmt))
1911 ;
1912 else if (gimple_code (stmt) == GIMPLE_NOP
1913 || gimple_code (stmt) == GIMPLE_PREDICT
1914 || gimple_code (stmt) == GIMPLE_COND)
1915 ;
1916 else
1917 return false;
1918 gsi_next (&gsi);
1919 }
1920 return true;
1921 }
1922
1923 /* Attempt to optimize (x <=> y) cmp 0 and similar comparisons.
1924 For strong ordering <=> try to match something like:
1925 <bb 2> : // cond3_bb (== cond2_bb)
1926 if (x_4(D) != y_5(D))
1927 goto <bb 3>; [INV]
1928 else
1929 goto <bb 6>; [INV]
1930
1931 <bb 3> : // cond_bb
1932 if (x_4(D) < y_5(D))
1933 goto <bb 6>; [INV]
1934 else
1935 goto <bb 4>; [INV]
1936
1937 <bb 4> : // middle_bb
1938
1939 <bb 6> : // phi_bb
1940 # iftmp.0_2 = PHI <1(4), 0(2), -1(3)>
1941 _1 = iftmp.0_2 == 0;
1942
1943 and for partial ordering <=> something like:
1944
1945 <bb 2> : // cond3_bb
1946 if (a_3(D) == b_5(D))
1947 goto <bb 6>; [50.00%]
1948 else
1949 goto <bb 3>; [50.00%]
1950
1951 <bb 3> [local count: 536870913]: // cond2_bb
1952 if (a_3(D) < b_5(D))
1953 goto <bb 6>; [50.00%]
1954 else
1955 goto <bb 4>; [50.00%]
1956
1957 <bb 4> [local count: 268435456]: // cond_bb
1958 if (a_3(D) > b_5(D))
1959 goto <bb 6>; [50.00%]
1960 else
1961 goto <bb 5>; [50.00%]
1962
1963 <bb 5> [local count: 134217728]: // middle_bb
1964
1965 <bb 6> [local count: 1073741824]: // phi_bb
1966 # SR.27_4 = PHI <0(2), -1(3), 1(4), 2(5)>
1967 _2 = SR.27_4 > 0; */
1968
1969 static bool
1970 spaceship_replacement (basic_block cond_bb, basic_block middle_bb,
1971 edge e0, edge e1, gphi *phi,
1972 tree arg0, tree arg1)
1973 {
1974 tree phires = PHI_RESULT (phi);
1975 if (!INTEGRAL_TYPE_P (TREE_TYPE (phires))
1976 || TYPE_UNSIGNED (TREE_TYPE (phires))
1977 || !tree_fits_shwi_p (arg0)
1978 || !tree_fits_shwi_p (arg1)
1979 || !IN_RANGE (tree_to_shwi (arg0), -1, 2)
1980 || !IN_RANGE (tree_to_shwi (arg1), -1, 2))
1981 return false;
1982
1983 basic_block phi_bb = gimple_bb (phi);
1984 gcc_assert (phi_bb == e0->dest && phi_bb == e1->dest);
1985 if (!IN_RANGE (EDGE_COUNT (phi_bb->preds), 3, 4))
1986 return false;
1987
1988 use_operand_p use_p;
1989 gimple *use_stmt;
1990 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phires))
1991 return false;
1992 if (!single_imm_use (phires, &use_p, &use_stmt))
1993 return false;
1994 enum tree_code cmp;
1995 tree lhs, rhs;
1996 gimple *orig_use_stmt = use_stmt;
1997 tree orig_use_lhs = NULL_TREE;
1998 int prec = TYPE_PRECISION (TREE_TYPE (phires));
1999 if (is_gimple_assign (use_stmt)
2000 && gimple_assign_rhs_code (use_stmt) == BIT_AND_EXPR
2001 && TREE_CODE (gimple_assign_rhs2 (use_stmt)) == INTEGER_CST
2002 && (wi::to_wide (gimple_assign_rhs2 (use_stmt))
2003 == wi::shifted_mask (1, prec - 1, false, prec)))
2004 {
2005 /* For partial_ordering result operator>= with unspec as second
2006 argument is (res & 1) == res, folded by match.pd into
2007 (res & ~1) == 0. */
2008 orig_use_lhs = gimple_assign_lhs (use_stmt);
2009 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2010 return false;
2011 if (EDGE_COUNT (phi_bb->preds) != 4)
2012 return false;
2013 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2014 return false;
2015 }
2016 if (gimple_code (use_stmt) == GIMPLE_COND)
2017 {
2018 cmp = gimple_cond_code (use_stmt);
2019 lhs = gimple_cond_lhs (use_stmt);
2020 rhs = gimple_cond_rhs (use_stmt);
2021 }
2022 else if (is_gimple_assign (use_stmt))
2023 {
2024 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2025 {
2026 cmp = gimple_assign_rhs_code (use_stmt);
2027 lhs = gimple_assign_rhs1 (use_stmt);
2028 rhs = gimple_assign_rhs2 (use_stmt);
2029 }
2030 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
2031 {
2032 tree cond = gimple_assign_rhs1 (use_stmt);
2033 if (!COMPARISON_CLASS_P (cond))
2034 return false;
2035 cmp = TREE_CODE (cond);
2036 lhs = TREE_OPERAND (cond, 0);
2037 rhs = TREE_OPERAND (cond, 1);
2038 }
2039 else
2040 return false;
2041 }
2042 else
2043 return false;
2044 switch (cmp)
2045 {
2046 case EQ_EXPR:
2047 case NE_EXPR:
2048 case LT_EXPR:
2049 case GT_EXPR:
2050 case LE_EXPR:
2051 case GE_EXPR:
2052 break;
2053 default:
2054 return false;
2055 }
2056 if (lhs != (orig_use_lhs ? orig_use_lhs : phires)
2057 || !tree_fits_shwi_p (rhs)
2058 || !IN_RANGE (tree_to_shwi (rhs), -1, 1))
2059 return false;
2060 if (orig_use_lhs)
2061 {
2062 if ((cmp != EQ_EXPR && cmp != NE_EXPR) || !integer_zerop (rhs))
2063 return false;
2064 /* As for -ffast-math we assume the 2 return to be
2065 impossible, canonicalize (res & ~1) == 0 into
2066 res >= 0 and (res & ~1) != 0 as res < 0. */
2067 cmp = cmp == EQ_EXPR ? GE_EXPR : LT_EXPR;
2068 }
2069
2070 if (!empty_block_p (middle_bb))
2071 return false;
2072
2073 gcond *cond1 = as_a <gcond *> (last_stmt (cond_bb));
2074 enum tree_code cmp1 = gimple_cond_code (cond1);
2075 switch (cmp1)
2076 {
2077 case LT_EXPR:
2078 case LE_EXPR:
2079 case GT_EXPR:
2080 case GE_EXPR:
2081 break;
2082 default:
2083 return false;
2084 }
2085 tree lhs1 = gimple_cond_lhs (cond1);
2086 tree rhs1 = gimple_cond_rhs (cond1);
2087 /* The optimization may be unsafe due to NaNs. */
2088 if (HONOR_NANS (TREE_TYPE (lhs1)))
2089 return false;
2090 if (TREE_CODE (lhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs1))
2091 return false;
2092 if (TREE_CODE (rhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1))
2093 return false;
2094
2095 if (!single_pred_p (cond_bb) || !cond_only_block_p (cond_bb))
2096 return false;
2097
2098 basic_block cond2_bb = single_pred (cond_bb);
2099 if (EDGE_COUNT (cond2_bb->succs) != 2)
2100 return false;
2101 edge cond2_phi_edge;
2102 if (EDGE_SUCC (cond2_bb, 0)->dest == cond_bb)
2103 {
2104 if (EDGE_SUCC (cond2_bb, 1)->dest != phi_bb)
2105 return false;
2106 cond2_phi_edge = EDGE_SUCC (cond2_bb, 1);
2107 }
2108 else if (EDGE_SUCC (cond2_bb, 0)->dest != phi_bb)
2109 return false;
2110 else
2111 cond2_phi_edge = EDGE_SUCC (cond2_bb, 0);
2112 tree arg2 = gimple_phi_arg_def (phi, cond2_phi_edge->dest_idx);
2113 if (!tree_fits_shwi_p (arg2))
2114 return false;
2115 gimple *cond2 = last_stmt (cond2_bb);
2116 if (cond2 == NULL || gimple_code (cond2) != GIMPLE_COND)
2117 return false;
2118 enum tree_code cmp2 = gimple_cond_code (cond2);
2119 tree lhs2 = gimple_cond_lhs (cond2);
2120 tree rhs2 = gimple_cond_rhs (cond2);
2121 if (lhs2 == lhs1)
2122 {
2123 if (!operand_equal_p (rhs2, rhs1, 0))
2124 {
2125 if ((cmp2 == EQ_EXPR || cmp2 == NE_EXPR)
2126 && TREE_CODE (rhs1) == INTEGER_CST
2127 && TREE_CODE (rhs2) == INTEGER_CST)
2128 {
2129 /* For integers, we can have cond2 x == 5
2130 and cond1 x < 5, x <= 4, x <= 5, x < 6,
2131 x > 5, x >= 6, x >= 5 or x > 4. */
2132 if (tree_int_cst_lt (rhs1, rhs2))
2133 {
2134 if (wi::ne_p (wi::to_wide (rhs1) + 1, wi::to_wide (rhs2)))
2135 return false;
2136 if (cmp1 == LE_EXPR)
2137 cmp1 = LT_EXPR;
2138 else if (cmp1 == GT_EXPR)
2139 cmp1 = GE_EXPR;
2140 else
2141 return false;
2142 }
2143 else
2144 {
2145 gcc_checking_assert (tree_int_cst_lt (rhs2, rhs1));
2146 if (wi::ne_p (wi::to_wide (rhs2) + 1, wi::to_wide (rhs1)))
2147 return false;
2148 if (cmp1 == LT_EXPR)
2149 cmp1 = LE_EXPR;
2150 else if (cmp1 == GE_EXPR)
2151 cmp1 = GT_EXPR;
2152 else
2153 return false;
2154 }
2155 rhs1 = rhs2;
2156 }
2157 else
2158 return false;
2159 }
2160 }
2161 else if (lhs2 == rhs1)
2162 {
2163 if (rhs2 != lhs1)
2164 return false;
2165 }
2166 else
2167 return false;
2168
2169 tree arg3 = arg2;
2170 basic_block cond3_bb = cond2_bb;
2171 edge cond3_phi_edge = cond2_phi_edge;
2172 gimple *cond3 = cond2;
2173 enum tree_code cmp3 = cmp2;
2174 tree lhs3 = lhs2;
2175 tree rhs3 = rhs2;
2176 if (EDGE_COUNT (phi_bb->preds) == 4)
2177 {
2178 if (absu_hwi (tree_to_shwi (arg2)) != 1)
2179 return false;
2180 if (e1->flags & EDGE_TRUE_VALUE)
2181 {
2182 if (tree_to_shwi (arg0) != 2
2183 || absu_hwi (tree_to_shwi (arg1)) != 1
2184 || wi::to_widest (arg1) == wi::to_widest (arg2))
2185 return false;
2186 }
2187 else if (tree_to_shwi (arg1) != 2
2188 || absu_hwi (tree_to_shwi (arg0)) != 1
2189 || wi::to_widest (arg0) == wi::to_widest (arg1))
2190 return false;
2191 switch (cmp2)
2192 {
2193 case LT_EXPR:
2194 case LE_EXPR:
2195 case GT_EXPR:
2196 case GE_EXPR:
2197 break;
2198 default:
2199 return false;
2200 }
2201 /* if (x < y) goto phi_bb; else fallthru;
2202 if (x > y) goto phi_bb; else fallthru;
2203 bbx:;
2204 phi_bb:;
2205 is ok, but if x and y are swapped in one of the comparisons,
2206 or the comparisons are the same and operands not swapped,
2207 or the true and false edges are swapped, it is not. */
2208 if ((lhs2 == lhs1)
2209 ^ (((cond2_phi_edge->flags
2210 & ((cmp2 == LT_EXPR || cmp2 == LE_EXPR)
2211 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)
2212 != ((e1->flags
2213 & ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2214 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)))
2215 return false;
2216 if (!single_pred_p (cond2_bb) || !cond_only_block_p (cond2_bb))
2217 return false;
2218 cond3_bb = single_pred (cond2_bb);
2219 if (EDGE_COUNT (cond2_bb->succs) != 2)
2220 return false;
2221 if (EDGE_SUCC (cond3_bb, 0)->dest == cond2_bb)
2222 {
2223 if (EDGE_SUCC (cond3_bb, 1)->dest != phi_bb)
2224 return false;
2225 cond3_phi_edge = EDGE_SUCC (cond3_bb, 1);
2226 }
2227 else if (EDGE_SUCC (cond3_bb, 0)->dest != phi_bb)
2228 return false;
2229 else
2230 cond3_phi_edge = EDGE_SUCC (cond3_bb, 0);
2231 arg3 = gimple_phi_arg_def (phi, cond3_phi_edge->dest_idx);
2232 cond3 = last_stmt (cond3_bb);
2233 if (cond3 == NULL || gimple_code (cond3) != GIMPLE_COND)
2234 return false;
2235 cmp3 = gimple_cond_code (cond3);
2236 lhs3 = gimple_cond_lhs (cond3);
2237 rhs3 = gimple_cond_rhs (cond3);
2238 if (lhs3 == lhs1)
2239 {
2240 if (!operand_equal_p (rhs3, rhs1, 0))
2241 return false;
2242 }
2243 else if (lhs3 == rhs1)
2244 {
2245 if (rhs3 != lhs1)
2246 return false;
2247 }
2248 else
2249 return false;
2250 }
2251 else if (absu_hwi (tree_to_shwi (arg0)) != 1
2252 || absu_hwi (tree_to_shwi (arg1)) != 1
2253 || wi::to_widest (arg0) == wi::to_widest (arg1))
2254 return false;
2255
2256 if (!integer_zerop (arg3) || (cmp3 != EQ_EXPR && cmp3 != NE_EXPR))
2257 return false;
2258 if ((cond3_phi_edge->flags & (cmp3 == EQ_EXPR
2259 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) == 0)
2260 return false;
2261
2262 /* lhs1 one_cmp rhs1 results in phires of 1. */
2263 enum tree_code one_cmp;
2264 if ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2265 ^ (!integer_onep ((e1->flags & EDGE_TRUE_VALUE) ? arg1 : arg0)))
2266 one_cmp = LT_EXPR;
2267 else
2268 one_cmp = GT_EXPR;
2269
2270 enum tree_code res_cmp;
2271 switch (cmp)
2272 {
2273 case EQ_EXPR:
2274 if (integer_zerop (rhs))
2275 res_cmp = EQ_EXPR;
2276 else if (integer_minus_onep (rhs))
2277 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2278 else if (integer_onep (rhs))
2279 res_cmp = one_cmp;
2280 else
2281 return false;
2282 break;
2283 case NE_EXPR:
2284 if (integer_zerop (rhs))
2285 res_cmp = NE_EXPR;
2286 else if (integer_minus_onep (rhs))
2287 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2288 else if (integer_onep (rhs))
2289 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2290 else
2291 return false;
2292 break;
2293 case LT_EXPR:
2294 if (integer_onep (rhs))
2295 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2296 else if (integer_zerop (rhs))
2297 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2298 else
2299 return false;
2300 break;
2301 case LE_EXPR:
2302 if (integer_zerop (rhs))
2303 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2304 else if (integer_minus_onep (rhs))
2305 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2306 else
2307 return false;
2308 break;
2309 case GT_EXPR:
2310 if (integer_minus_onep (rhs))
2311 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2312 else if (integer_zerop (rhs))
2313 res_cmp = one_cmp;
2314 else
2315 return false;
2316 break;
2317 case GE_EXPR:
2318 if (integer_zerop (rhs))
2319 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2320 else if (integer_onep (rhs))
2321 res_cmp = one_cmp;
2322 else
2323 return false;
2324 break;
2325 default:
2326 gcc_unreachable ();
2327 }
2328
2329 if (gimple_code (use_stmt) == GIMPLE_COND)
2330 {
2331 gcond *use_cond = as_a <gcond *> (use_stmt);
2332 gimple_cond_set_code (use_cond, res_cmp);
2333 gimple_cond_set_lhs (use_cond, lhs1);
2334 gimple_cond_set_rhs (use_cond, rhs1);
2335 }
2336 else if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2337 {
2338 gimple_assign_set_rhs_code (use_stmt, res_cmp);
2339 gimple_assign_set_rhs1 (use_stmt, lhs1);
2340 gimple_assign_set_rhs2 (use_stmt, rhs1);
2341 }
2342 else
2343 {
2344 tree cond = build2 (res_cmp, TREE_TYPE (gimple_assign_rhs1 (use_stmt)),
2345 lhs1, rhs1);
2346 gimple_assign_set_rhs1 (use_stmt, cond);
2347 }
2348 update_stmt (use_stmt);
2349
2350 if (MAY_HAVE_DEBUG_BIND_STMTS)
2351 {
2352 use_operand_p use_p;
2353 imm_use_iterator iter;
2354 bool has_debug_uses = false;
2355 FOR_EACH_IMM_USE_FAST (use_p, iter, phires)
2356 {
2357 gimple *use_stmt = USE_STMT (use_p);
2358 if (orig_use_lhs && use_stmt == orig_use_stmt)
2359 continue;
2360 gcc_assert (is_gimple_debug (use_stmt));
2361 has_debug_uses = true;
2362 break;
2363 }
2364 if (orig_use_lhs)
2365 {
2366 if (!has_debug_uses)
2367 FOR_EACH_IMM_USE_FAST (use_p, iter, orig_use_lhs)
2368 {
2369 gimple *use_stmt = USE_STMT (use_p);
2370 gcc_assert (is_gimple_debug (use_stmt));
2371 has_debug_uses = true;
2372 }
2373 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2374 tree zero = build_zero_cst (TREE_TYPE (orig_use_lhs));
2375 gimple_assign_set_rhs_with_ops (&gsi, INTEGER_CST, zero);
2376 update_stmt (orig_use_stmt);
2377 }
2378
2379 if (has_debug_uses)
2380 {
2381 /* If there are debug uses, emit something like:
2382 # DEBUG D#1 => i_2(D) > j_3(D) ? 1 : -1
2383 # DEBUG D#2 => i_2(D) == j_3(D) ? 0 : D#1
2384 where > stands for the comparison that yielded 1
2385 and replace debug uses of phi result with that D#2.
2386 Ignore the value of 2, because if NaNs aren't expected,
2387 all floating point numbers should be comparable. */
2388 gimple_stmt_iterator gsi = gsi_after_labels (gimple_bb (phi));
2389 tree type = TREE_TYPE (phires);
2390 tree temp1 = make_node (DEBUG_EXPR_DECL);
2391 DECL_ARTIFICIAL (temp1) = 1;
2392 TREE_TYPE (temp1) = type;
2393 SET_DECL_MODE (temp1, TYPE_MODE (type));
2394 tree t = build2 (one_cmp, boolean_type_node, lhs1, rhs2);
2395 t = build3 (COND_EXPR, type, t, build_one_cst (type),
2396 build_int_cst (type, -1));
2397 gimple *g = gimple_build_debug_bind (temp1, t, phi);
2398 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2399 tree temp2 = make_node (DEBUG_EXPR_DECL);
2400 DECL_ARTIFICIAL (temp2) = 1;
2401 TREE_TYPE (temp2) = type;
2402 SET_DECL_MODE (temp2, TYPE_MODE (type));
2403 t = build2 (EQ_EXPR, boolean_type_node, lhs1, rhs2);
2404 t = build3 (COND_EXPR, type, t, build_zero_cst (type), temp1);
2405 g = gimple_build_debug_bind (temp2, t, phi);
2406 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2407 replace_uses_by (phires, temp2);
2408 if (orig_use_lhs)
2409 replace_uses_by (orig_use_lhs, temp2);
2410 }
2411 }
2412
2413 if (orig_use_lhs)
2414 {
2415 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2416 gsi_remove (&gsi, true);
2417 }
2418
2419 gimple_stmt_iterator psi = gsi_for_stmt (phi);
2420 remove_phi_node (&psi, true);
2421 statistics_counter_event (cfun, "spaceship replacement", 1);
2422
2423 return true;
2424 }
2425
2426 /* Convert
2427
2428 <bb 2>
2429 if (b_4(D) != 0)
2430 goto <bb 3>
2431 else
2432 goto <bb 4>
2433
2434 <bb 3>
2435 _2 = (unsigned long) b_4(D);
2436 _9 = __builtin_popcountl (_2);
2437 OR
2438 _9 = __builtin_popcountl (b_4(D));
2439
2440 <bb 4>
2441 c_12 = PHI <0(2), _9(3)>
2442
2443 Into
2444 <bb 2>
2445 _2 = (unsigned long) b_4(D);
2446 _9 = __builtin_popcountl (_2);
2447 OR
2448 _9 = __builtin_popcountl (b_4(D));
2449
2450 <bb 4>
2451 c_12 = PHI <_9(2)>
2452
2453 Similarly for __builtin_clz or __builtin_ctz if
2454 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
2455 instead of 0 above it uses the value from that macro. */
2456
2457 static bool
2458 cond_removal_in_popcount_clz_ctz_pattern (basic_block cond_bb,
2459 basic_block middle_bb,
2460 edge e1, edge e2, gphi *phi,
2461 tree arg0, tree arg1)
2462 {
2463 gimple *cond;
2464 gimple_stmt_iterator gsi, gsi_from;
2465 gimple *call;
2466 gimple *cast = NULL;
2467 tree lhs, arg;
2468
2469 /* Check that
2470 _2 = (unsigned long) b_4(D);
2471 _9 = __builtin_popcountl (_2);
2472 OR
2473 _9 = __builtin_popcountl (b_4(D));
2474 are the only stmts in the middle_bb. */
2475
2476 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
2477 if (gsi_end_p (gsi))
2478 return false;
2479 cast = gsi_stmt (gsi);
2480 gsi_next_nondebug (&gsi);
2481 if (!gsi_end_p (gsi))
2482 {
2483 call = gsi_stmt (gsi);
2484 gsi_next_nondebug (&gsi);
2485 if (!gsi_end_p (gsi))
2486 return false;
2487 }
2488 else
2489 {
2490 call = cast;
2491 cast = NULL;
2492 }
2493
2494 /* Check that we have a popcount/clz/ctz builtin. */
2495 if (!is_gimple_call (call) || gimple_call_num_args (call) != 1)
2496 return false;
2497
2498 arg = gimple_call_arg (call, 0);
2499 lhs = gimple_get_lhs (call);
2500
2501 if (lhs == NULL_TREE)
2502 return false;
2503
2504 combined_fn cfn = gimple_call_combined_fn (call);
2505 internal_fn ifn = IFN_LAST;
2506 int val = 0;
2507 switch (cfn)
2508 {
2509 CASE_CFN_POPCOUNT:
2510 break;
2511 CASE_CFN_CLZ:
2512 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2513 {
2514 tree type = TREE_TYPE (arg);
2515 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
2516 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2517 val) == 2)
2518 {
2519 ifn = IFN_CLZ;
2520 break;
2521 }
2522 }
2523 return false;
2524 CASE_CFN_CTZ:
2525 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2526 {
2527 tree type = TREE_TYPE (arg);
2528 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
2529 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2530 val) == 2)
2531 {
2532 ifn = IFN_CTZ;
2533 break;
2534 }
2535 }
2536 return false;
2537 default:
2538 return false;
2539 }
2540
2541 if (cast)
2542 {
2543 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
2544 /* Check that we have a cast prior to that. */
2545 if (gimple_code (cast) != GIMPLE_ASSIGN
2546 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
2547 return false;
2548 /* Result of the cast stmt is the argument to the builtin. */
2549 if (arg != gimple_assign_lhs (cast))
2550 return false;
2551 arg = gimple_assign_rhs1 (cast);
2552 }
2553
2554 cond = last_stmt (cond_bb);
2555
2556 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
2557 builtin. */
2558 if (gimple_code (cond) != GIMPLE_COND
2559 || (gimple_cond_code (cond) != NE_EXPR
2560 && gimple_cond_code (cond) != EQ_EXPR)
2561 || !integer_zerop (gimple_cond_rhs (cond))
2562 || arg != gimple_cond_lhs (cond))
2563 return false;
2564
2565 /* Canonicalize. */
2566 if ((e2->flags & EDGE_TRUE_VALUE
2567 && gimple_cond_code (cond) == NE_EXPR)
2568 || (e1->flags & EDGE_TRUE_VALUE
2569 && gimple_cond_code (cond) == EQ_EXPR))
2570 {
2571 std::swap (arg0, arg1);
2572 std::swap (e1, e2);
2573 }
2574
2575 /* Check PHI arguments. */
2576 if (lhs != arg0
2577 || TREE_CODE (arg1) != INTEGER_CST
2578 || wi::to_wide (arg1) != val)
2579 return false;
2580
2581 /* And insert the popcount/clz/ctz builtin and cast stmt before the
2582 cond_bb. */
2583 gsi = gsi_last_bb (cond_bb);
2584 if (cast)
2585 {
2586 gsi_from = gsi_for_stmt (cast);
2587 gsi_move_before (&gsi_from, &gsi);
2588 reset_flow_sensitive_info (gimple_get_lhs (cast));
2589 }
2590 gsi_from = gsi_for_stmt (call);
2591 if (ifn == IFN_LAST || gimple_call_internal_p (call))
2592 gsi_move_before (&gsi_from, &gsi);
2593 else
2594 {
2595 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
2596 the latter is well defined at zero. */
2597 call = gimple_build_call_internal (ifn, 1, gimple_call_arg (call, 0));
2598 gimple_call_set_lhs (call, lhs);
2599 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2600 gsi_remove (&gsi_from, true);
2601 }
2602 reset_flow_sensitive_info (lhs);
2603
2604 /* Now update the PHI and remove unneeded bbs. */
2605 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
2606 return true;
2607 }
2608
2609 /* The function absolute_replacement does the main work of doing the absolute
2610 replacement. Return true if the replacement is done. Otherwise return
2611 false.
2612 bb is the basic block where the replacement is going to be done on. arg0
2613 is argument 0 from the phi. Likewise for arg1. */
2614
2615 static bool
2616 abs_replacement (basic_block cond_bb, basic_block middle_bb,
2617 edge e0 ATTRIBUTE_UNUSED, edge e1,
2618 gphi *phi, tree arg0, tree arg1)
2619 {
2620 tree result;
2621 gassign *new_stmt;
2622 gimple *cond;
2623 gimple_stmt_iterator gsi;
2624 edge true_edge, false_edge;
2625 gimple *assign;
2626 edge e;
2627 tree rhs, lhs;
2628 bool negate;
2629 enum tree_code cond_code;
2630
2631 /* If the type says honor signed zeros we cannot do this
2632 optimization. */
2633 if (HONOR_SIGNED_ZEROS (arg1))
2634 return false;
2635
2636 /* OTHER_BLOCK must have only one executable statement which must have the
2637 form arg0 = -arg1 or arg1 = -arg0. */
2638
2639 assign = last_and_only_stmt (middle_bb);
2640 /* If we did not find the proper negation assignment, then we cannot
2641 optimize. */
2642 if (assign == NULL)
2643 return false;
2644
2645 /* If we got here, then we have found the only executable statement
2646 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
2647 arg1 = -arg0, then we cannot optimize. */
2648 if (gimple_code (assign) != GIMPLE_ASSIGN)
2649 return false;
2650
2651 lhs = gimple_assign_lhs (assign);
2652
2653 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
2654 return false;
2655
2656 rhs = gimple_assign_rhs1 (assign);
2657
2658 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
2659 if (!(lhs == arg0 && rhs == arg1)
2660 && !(lhs == arg1 && rhs == arg0))
2661 return false;
2662
2663 cond = last_stmt (cond_bb);
2664 result = PHI_RESULT (phi);
2665
2666 /* Only relationals comparing arg[01] against zero are interesting. */
2667 cond_code = gimple_cond_code (cond);
2668 if (cond_code != GT_EXPR && cond_code != GE_EXPR
2669 && cond_code != LT_EXPR && cond_code != LE_EXPR)
2670 return false;
2671
2672 /* Make sure the conditional is arg[01] OP y. */
2673 if (gimple_cond_lhs (cond) != rhs)
2674 return false;
2675
2676 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
2677 ? real_zerop (gimple_cond_rhs (cond))
2678 : integer_zerop (gimple_cond_rhs (cond)))
2679 ;
2680 else
2681 return false;
2682
2683 /* We need to know which is the true edge and which is the false
2684 edge so that we know if have abs or negative abs. */
2685 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
2686
2687 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
2688 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
2689 the false edge goes to OTHER_BLOCK. */
2690 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
2691 e = true_edge;
2692 else
2693 e = false_edge;
2694
2695 if (e->dest == middle_bb)
2696 negate = true;
2697 else
2698 negate = false;
2699
2700 /* If the code negates only iff positive then make sure to not
2701 introduce undefined behavior when negating or computing the absolute.
2702 ??? We could use range info if present to check for arg1 == INT_MIN. */
2703 if (negate
2704 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1))
2705 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1))))
2706 return false;
2707
2708 result = duplicate_ssa_name (result, NULL);
2709
2710 if (negate)
2711 lhs = make_ssa_name (TREE_TYPE (result));
2712 else
2713 lhs = result;
2714
2715 /* Build the modify expression with abs expression. */
2716 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
2717
2718 gsi = gsi_last_bb (cond_bb);
2719 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2720
2721 if (negate)
2722 {
2723 /* Get the right GSI. We want to insert after the recently
2724 added ABS_EXPR statement (which we know is the first statement
2725 in the block. */
2726 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
2727
2728 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2729 }
2730
2731 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2732
2733 /* Note that we optimized this PHI. */
2734 return true;
2735 }
2736
2737 /* Auxiliary functions to determine the set of memory accesses which
2738 can't trap because they are preceded by accesses to the same memory
2739 portion. We do that for MEM_REFs, so we only need to track
2740 the SSA_NAME of the pointer indirectly referenced. The algorithm
2741 simply is a walk over all instructions in dominator order. When
2742 we see an MEM_REF we determine if we've already seen a same
2743 ref anywhere up to the root of the dominator tree. If we do the
2744 current access can't trap. If we don't see any dominating access
2745 the current access might trap, but might also make later accesses
2746 non-trapping, so we remember it. We need to be careful with loads
2747 or stores, for instance a load might not trap, while a store would,
2748 so if we see a dominating read access this doesn't mean that a later
2749 write access would not trap. Hence we also need to differentiate the
2750 type of access(es) seen.
2751
2752 ??? We currently are very conservative and assume that a load might
2753 trap even if a store doesn't (write-only memory). This probably is
2754 overly conservative.
2755
2756 We currently support a special case that for !TREE_ADDRESSABLE automatic
2757 variables, it could ignore whether something is a load or store because the
2758 local stack should be always writable. */
2759
2760 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
2761 basic block an *_REF through it was seen, which would constitute a
2762 no-trap region for same accesses.
2763
2764 Size is needed to support 2 MEM_REFs of different types, like
2765 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
2766 OEP_ADDRESS_OF. */
2767 struct ref_to_bb
2768 {
2769 tree exp;
2770 HOST_WIDE_INT size;
2771 unsigned int phase;
2772 basic_block bb;
2773 };
2774
2775 /* Hashtable helpers. */
2776
2777 struct refs_hasher : free_ptr_hash<ref_to_bb>
2778 {
2779 static inline hashval_t hash (const ref_to_bb *);
2780 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
2781 };
2782
2783 /* Used for quick clearing of the hash-table when we see calls.
2784 Hash entries with phase < nt_call_phase are invalid. */
2785 static unsigned int nt_call_phase;
2786
2787 /* The hash function. */
2788
2789 inline hashval_t
2790 refs_hasher::hash (const ref_to_bb *n)
2791 {
2792 inchash::hash hstate;
2793 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
2794 hstate.add_hwi (n->size);
2795 return hstate.end ();
2796 }
2797
2798 /* The equality function of *P1 and *P2. */
2799
2800 inline bool
2801 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
2802 {
2803 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
2804 && n1->size == n2->size;
2805 }
2806
2807 class nontrapping_dom_walker : public dom_walker
2808 {
2809 public:
2810 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
2811 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
2812 {}
2813
2814 virtual edge before_dom_children (basic_block);
2815 virtual void after_dom_children (basic_block);
2816
2817 private:
2818
2819 /* We see the expression EXP in basic block BB. If it's an interesting
2820 expression (an MEM_REF through an SSA_NAME) possibly insert the
2821 expression into the set NONTRAP or the hash table of seen expressions.
2822 STORE is true if this expression is on the LHS, otherwise it's on
2823 the RHS. */
2824 void add_or_mark_expr (basic_block, tree, bool);
2825
2826 hash_set<tree> *m_nontrapping;
2827
2828 /* The hash table for remembering what we've seen. */
2829 hash_table<refs_hasher> m_seen_refs;
2830 };
2831
2832 /* Called by walk_dominator_tree, when entering the block BB. */
2833 edge
2834 nontrapping_dom_walker::before_dom_children (basic_block bb)
2835 {
2836 edge e;
2837 edge_iterator ei;
2838 gimple_stmt_iterator gsi;
2839
2840 /* If we haven't seen all our predecessors, clear the hash-table. */
2841 FOR_EACH_EDGE (e, ei, bb->preds)
2842 if ((((size_t)e->src->aux) & 2) == 0)
2843 {
2844 nt_call_phase++;
2845 break;
2846 }
2847
2848 /* Mark this BB as being on the path to dominator root and as visited. */
2849 bb->aux = (void*)(1 | 2);
2850
2851 /* And walk the statements in order. */
2852 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2853 {
2854 gimple *stmt = gsi_stmt (gsi);
2855
2856 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
2857 || (is_gimple_call (stmt)
2858 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
2859 nt_call_phase++;
2860 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
2861 {
2862 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
2863 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
2864 }
2865 }
2866 return NULL;
2867 }
2868
2869 /* Called by walk_dominator_tree, when basic block BB is exited. */
2870 void
2871 nontrapping_dom_walker::after_dom_children (basic_block bb)
2872 {
2873 /* This BB isn't on the path to dominator root anymore. */
2874 bb->aux = (void*)2;
2875 }
2876
2877 /* We see the expression EXP in basic block BB. If it's an interesting
2878 expression of:
2879 1) MEM_REF
2880 2) ARRAY_REF
2881 3) COMPONENT_REF
2882 possibly insert the expression into the set NONTRAP or the hash table
2883 of seen expressions. STORE is true if this expression is on the LHS,
2884 otherwise it's on the RHS. */
2885 void
2886 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
2887 {
2888 HOST_WIDE_INT size;
2889
2890 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
2891 || TREE_CODE (exp) == COMPONENT_REF)
2892 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
2893 {
2894 struct ref_to_bb map;
2895 ref_to_bb **slot;
2896 struct ref_to_bb *r2bb;
2897 basic_block found_bb = 0;
2898
2899 if (!store)
2900 {
2901 tree base = get_base_address (exp);
2902 /* Only record a LOAD of a local variable without address-taken, as
2903 the local stack is always writable. This allows cselim on a STORE
2904 with a dominating LOAD. */
2905 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
2906 return;
2907 }
2908
2909 /* Try to find the last seen *_REF, which can trap. */
2910 map.exp = exp;
2911 map.size = size;
2912 slot = m_seen_refs.find_slot (&map, INSERT);
2913 r2bb = *slot;
2914 if (r2bb && r2bb->phase >= nt_call_phase)
2915 found_bb = r2bb->bb;
2916
2917 /* If we've found a trapping *_REF, _and_ it dominates EXP
2918 (it's in a basic block on the path from us to the dominator root)
2919 then we can't trap. */
2920 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
2921 {
2922 m_nontrapping->add (exp);
2923 }
2924 else
2925 {
2926 /* EXP might trap, so insert it into the hash table. */
2927 if (r2bb)
2928 {
2929 r2bb->phase = nt_call_phase;
2930 r2bb->bb = bb;
2931 }
2932 else
2933 {
2934 r2bb = XNEW (struct ref_to_bb);
2935 r2bb->phase = nt_call_phase;
2936 r2bb->bb = bb;
2937 r2bb->exp = exp;
2938 r2bb->size = size;
2939 *slot = r2bb;
2940 }
2941 }
2942 }
2943 }
2944
2945 /* This is the entry point of gathering non trapping memory accesses.
2946 It will do a dominator walk over the whole function, and it will
2947 make use of the bb->aux pointers. It returns a set of trees
2948 (the MEM_REFs itself) which can't trap. */
2949 static hash_set<tree> *
2950 get_non_trapping (void)
2951 {
2952 nt_call_phase = 0;
2953 hash_set<tree> *nontrap = new hash_set<tree>;
2954
2955 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
2956 .walk (cfun->cfg->x_entry_block_ptr);
2957
2958 clear_aux_for_blocks ();
2959 return nontrap;
2960 }
2961
2962 /* Do the main work of conditional store replacement. We already know
2963 that the recognized pattern looks like so:
2964
2965 split:
2966 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
2967 MIDDLE_BB:
2968 something
2969 fallthrough (edge E0)
2970 JOIN_BB:
2971 some more
2972
2973 We check that MIDDLE_BB contains only one store, that that store
2974 doesn't trap (not via NOTRAP, but via checking if an access to the same
2975 memory location dominates us, or the store is to a local addressable
2976 object) and that the store has a "simple" RHS. */
2977
2978 static bool
2979 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
2980 edge e0, edge e1, hash_set<tree> *nontrap)
2981 {
2982 gimple *assign = last_and_only_stmt (middle_bb);
2983 tree lhs, rhs, name, name2;
2984 gphi *newphi;
2985 gassign *new_stmt;
2986 gimple_stmt_iterator gsi;
2987 location_t locus;
2988
2989 /* Check if middle_bb contains of only one store. */
2990 if (!assign
2991 || !gimple_assign_single_p (assign)
2992 || gimple_has_volatile_ops (assign))
2993 return false;
2994
2995 /* And no PHI nodes so all uses in the single stmt are also
2996 available where we insert to. */
2997 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
2998 return false;
2999
3000 locus = gimple_location (assign);
3001 lhs = gimple_assign_lhs (assign);
3002 rhs = gimple_assign_rhs1 (assign);
3003 if ((!REFERENCE_CLASS_P (lhs)
3004 && !DECL_P (lhs))
3005 || !is_gimple_reg_type (TREE_TYPE (lhs)))
3006 return false;
3007
3008 /* Prove that we can move the store down. We could also check
3009 TREE_THIS_NOTRAP here, but in that case we also could move stores,
3010 whose value is not available readily, which we want to avoid. */
3011 if (!nontrap->contains (lhs))
3012 {
3013 /* If LHS is an access to a local variable without address-taken
3014 (or when we allow data races) and known not to trap, we could
3015 always safely move down the store. */
3016 tree base = get_base_address (lhs);
3017 if (!auto_var_p (base)
3018 || (TREE_ADDRESSABLE (base) && !flag_store_data_races)
3019 || tree_could_trap_p (lhs))
3020 return false;
3021 }
3022
3023 /* Now we've checked the constraints, so do the transformation:
3024 1) Remove the single store. */
3025 gsi = gsi_for_stmt (assign);
3026 unlink_stmt_vdef (assign);
3027 gsi_remove (&gsi, true);
3028 release_defs (assign);
3029
3030 /* Make both store and load use alias-set zero as we have to
3031 deal with the case of the store being a conditional change
3032 of the dynamic type. */
3033 lhs = unshare_expr (lhs);
3034 tree *basep = &lhs;
3035 while (handled_component_p (*basep))
3036 basep = &TREE_OPERAND (*basep, 0);
3037 if (TREE_CODE (*basep) == MEM_REF
3038 || TREE_CODE (*basep) == TARGET_MEM_REF)
3039 TREE_OPERAND (*basep, 1)
3040 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
3041 else
3042 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
3043 build_fold_addr_expr (*basep),
3044 build_zero_cst (ptr_type_node));
3045
3046 /* 2) Insert a load from the memory of the store to the temporary
3047 on the edge which did not contain the store. */
3048 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3049 new_stmt = gimple_build_assign (name, lhs);
3050 gimple_set_location (new_stmt, locus);
3051 lhs = unshare_expr (lhs);
3052 {
3053 /* Set the no-warning bit on the rhs of the load to avoid uninit
3054 warnings. */
3055 tree rhs1 = gimple_assign_rhs1 (new_stmt);
3056 suppress_warning (rhs1, OPT_Wuninitialized);
3057 }
3058 gsi_insert_on_edge (e1, new_stmt);
3059
3060 /* 3) Create a PHI node at the join block, with one argument
3061 holding the old RHS, and the other holding the temporary
3062 where we stored the old memory contents. */
3063 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3064 newphi = create_phi_node (name2, join_bb);
3065 add_phi_arg (newphi, rhs, e0, locus);
3066 add_phi_arg (newphi, name, e1, locus);
3067
3068 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3069
3070 /* 4) Insert that PHI node. */
3071 gsi = gsi_after_labels (join_bb);
3072 if (gsi_end_p (gsi))
3073 {
3074 gsi = gsi_last_bb (join_bb);
3075 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3076 }
3077 else
3078 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3079
3080 if (dump_file && (dump_flags & TDF_DETAILS))
3081 {
3082 fprintf (dump_file, "\nConditional store replacement happened!");
3083 fprintf (dump_file, "\nReplaced the store with a load.");
3084 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
3085 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
3086 }
3087 statistics_counter_event (cfun, "conditional store replacement", 1);
3088
3089 return true;
3090 }
3091
3092 /* Do the main work of conditional store replacement. */
3093
3094 static bool
3095 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
3096 basic_block join_bb, gimple *then_assign,
3097 gimple *else_assign)
3098 {
3099 tree lhs_base, lhs, then_rhs, else_rhs, name;
3100 location_t then_locus, else_locus;
3101 gimple_stmt_iterator gsi;
3102 gphi *newphi;
3103 gassign *new_stmt;
3104
3105 if (then_assign == NULL
3106 || !gimple_assign_single_p (then_assign)
3107 || gimple_clobber_p (then_assign)
3108 || gimple_has_volatile_ops (then_assign)
3109 || else_assign == NULL
3110 || !gimple_assign_single_p (else_assign)
3111 || gimple_clobber_p (else_assign)
3112 || gimple_has_volatile_ops (else_assign))
3113 return false;
3114
3115 lhs = gimple_assign_lhs (then_assign);
3116 if (!is_gimple_reg_type (TREE_TYPE (lhs))
3117 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
3118 return false;
3119
3120 lhs_base = get_base_address (lhs);
3121 if (lhs_base == NULL_TREE
3122 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
3123 return false;
3124
3125 then_rhs = gimple_assign_rhs1 (then_assign);
3126 else_rhs = gimple_assign_rhs1 (else_assign);
3127 then_locus = gimple_location (then_assign);
3128 else_locus = gimple_location (else_assign);
3129
3130 /* Now we've checked the constraints, so do the transformation:
3131 1) Remove the stores. */
3132 gsi = gsi_for_stmt (then_assign);
3133 unlink_stmt_vdef (then_assign);
3134 gsi_remove (&gsi, true);
3135 release_defs (then_assign);
3136
3137 gsi = gsi_for_stmt (else_assign);
3138 unlink_stmt_vdef (else_assign);
3139 gsi_remove (&gsi, true);
3140 release_defs (else_assign);
3141
3142 /* 2) Create a PHI node at the join block, with one argument
3143 holding the old RHS, and the other holding the temporary
3144 where we stored the old memory contents. */
3145 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3146 newphi = create_phi_node (name, join_bb);
3147 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
3148 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
3149
3150 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3151
3152 /* 3) Insert that PHI node. */
3153 gsi = gsi_after_labels (join_bb);
3154 if (gsi_end_p (gsi))
3155 {
3156 gsi = gsi_last_bb (join_bb);
3157 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3158 }
3159 else
3160 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3161
3162 statistics_counter_event (cfun, "if-then-else store replacement", 1);
3163
3164 return true;
3165 }
3166
3167 /* Return the single store in BB with VDEF or NULL if there are
3168 other stores in the BB or loads following the store. */
3169
3170 static gimple *
3171 single_trailing_store_in_bb (basic_block bb, tree vdef)
3172 {
3173 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
3174 return NULL;
3175 gimple *store = SSA_NAME_DEF_STMT (vdef);
3176 if (gimple_bb (store) != bb
3177 || gimple_code (store) == GIMPLE_PHI)
3178 return NULL;
3179
3180 /* Verify there is no other store in this BB. */
3181 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
3182 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
3183 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
3184 return NULL;
3185
3186 /* Verify there is no load or store after the store. */
3187 use_operand_p use_p;
3188 imm_use_iterator imm_iter;
3189 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
3190 if (USE_STMT (use_p) != store
3191 && gimple_bb (USE_STMT (use_p)) == bb)
3192 return NULL;
3193
3194 return store;
3195 }
3196
3197 /* Conditional store replacement. We already know
3198 that the recognized pattern looks like so:
3199
3200 split:
3201 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
3202 THEN_BB:
3203 ...
3204 X = Y;
3205 ...
3206 goto JOIN_BB;
3207 ELSE_BB:
3208 ...
3209 X = Z;
3210 ...
3211 fallthrough (edge E0)
3212 JOIN_BB:
3213 some more
3214
3215 We check that it is safe to sink the store to JOIN_BB by verifying that
3216 there are no read-after-write or write-after-write dependencies in
3217 THEN_BB and ELSE_BB. */
3218
3219 static bool
3220 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
3221 basic_block join_bb)
3222 {
3223 vec<data_reference_p> then_datarefs, else_datarefs;
3224 vec<ddr_p> then_ddrs, else_ddrs;
3225 gimple *then_store, *else_store;
3226 bool found, ok = false, res;
3227 struct data_dependence_relation *ddr;
3228 data_reference_p then_dr, else_dr;
3229 int i, j;
3230 tree then_lhs, else_lhs;
3231 basic_block blocks[3];
3232
3233 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
3234 cheap enough to always handle as it allows us to elide dependence
3235 checking. */
3236 gphi *vphi = NULL;
3237 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
3238 gsi_next (&si))
3239 if (virtual_operand_p (gimple_phi_result (si.phi ())))
3240 {
3241 vphi = si.phi ();
3242 break;
3243 }
3244 if (!vphi)
3245 return false;
3246 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
3247 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
3248 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
3249 if (then_assign)
3250 {
3251 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
3252 if (else_assign)
3253 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3254 then_assign, else_assign);
3255 }
3256
3257 /* If either vectorization or if-conversion is disabled then do
3258 not sink any stores. */
3259 if (param_max_stores_to_sink == 0
3260 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
3261 || !flag_tree_loop_if_convert)
3262 return false;
3263
3264 /* Find data references. */
3265 then_datarefs.create (1);
3266 else_datarefs.create (1);
3267 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
3268 == chrec_dont_know)
3269 || !then_datarefs.length ()
3270 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
3271 == chrec_dont_know)
3272 || !else_datarefs.length ())
3273 {
3274 free_data_refs (then_datarefs);
3275 free_data_refs (else_datarefs);
3276 return false;
3277 }
3278
3279 /* Find pairs of stores with equal LHS. */
3280 auto_vec<gimple *, 1> then_stores, else_stores;
3281 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
3282 {
3283 if (DR_IS_READ (then_dr))
3284 continue;
3285
3286 then_store = DR_STMT (then_dr);
3287 then_lhs = gimple_get_lhs (then_store);
3288 if (then_lhs == NULL_TREE)
3289 continue;
3290 found = false;
3291
3292 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
3293 {
3294 if (DR_IS_READ (else_dr))
3295 continue;
3296
3297 else_store = DR_STMT (else_dr);
3298 else_lhs = gimple_get_lhs (else_store);
3299 if (else_lhs == NULL_TREE)
3300 continue;
3301
3302 if (operand_equal_p (then_lhs, else_lhs, 0))
3303 {
3304 found = true;
3305 break;
3306 }
3307 }
3308
3309 if (!found)
3310 continue;
3311
3312 then_stores.safe_push (then_store);
3313 else_stores.safe_push (else_store);
3314 }
3315
3316 /* No pairs of stores found. */
3317 if (!then_stores.length ()
3318 || then_stores.length () > (unsigned) param_max_stores_to_sink)
3319 {
3320 free_data_refs (then_datarefs);
3321 free_data_refs (else_datarefs);
3322 return false;
3323 }
3324
3325 /* Compute and check data dependencies in both basic blocks. */
3326 then_ddrs.create (1);
3327 else_ddrs.create (1);
3328 if (!compute_all_dependences (then_datarefs, &then_ddrs,
3329 vNULL, false)
3330 || !compute_all_dependences (else_datarefs, &else_ddrs,
3331 vNULL, false))
3332 {
3333 free_dependence_relations (then_ddrs);
3334 free_dependence_relations (else_ddrs);
3335 free_data_refs (then_datarefs);
3336 free_data_refs (else_datarefs);
3337 return false;
3338 }
3339 blocks[0] = then_bb;
3340 blocks[1] = else_bb;
3341 blocks[2] = join_bb;
3342 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
3343
3344 /* Check that there are no read-after-write or write-after-write dependencies
3345 in THEN_BB. */
3346 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
3347 {
3348 struct data_reference *dra = DDR_A (ddr);
3349 struct data_reference *drb = DDR_B (ddr);
3350
3351 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3352 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3353 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3354 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3355 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3356 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3357 {
3358 free_dependence_relations (then_ddrs);
3359 free_dependence_relations (else_ddrs);
3360 free_data_refs (then_datarefs);
3361 free_data_refs (else_datarefs);
3362 return false;
3363 }
3364 }
3365
3366 /* Check that there are no read-after-write or write-after-write dependencies
3367 in ELSE_BB. */
3368 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
3369 {
3370 struct data_reference *dra = DDR_A (ddr);
3371 struct data_reference *drb = DDR_B (ddr);
3372
3373 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3374 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3375 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3376 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3377 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3378 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3379 {
3380 free_dependence_relations (then_ddrs);
3381 free_dependence_relations (else_ddrs);
3382 free_data_refs (then_datarefs);
3383 free_data_refs (else_datarefs);
3384 return false;
3385 }
3386 }
3387
3388 /* Sink stores with same LHS. */
3389 FOR_EACH_VEC_ELT (then_stores, i, then_store)
3390 {
3391 else_store = else_stores[i];
3392 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3393 then_store, else_store);
3394 ok = ok || res;
3395 }
3396
3397 free_dependence_relations (then_ddrs);
3398 free_dependence_relations (else_ddrs);
3399 free_data_refs (then_datarefs);
3400 free_data_refs (else_datarefs);
3401
3402 return ok;
3403 }
3404
3405 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
3406
3407 static bool
3408 local_mem_dependence (gimple *stmt, basic_block bb)
3409 {
3410 tree vuse = gimple_vuse (stmt);
3411 gimple *def;
3412
3413 if (!vuse)
3414 return false;
3415
3416 def = SSA_NAME_DEF_STMT (vuse);
3417 return (def && gimple_bb (def) == bb);
3418 }
3419
3420 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
3421 BB1 and BB2 are "then" and "else" blocks dependent on this test,
3422 and BB3 rejoins control flow following BB1 and BB2, look for
3423 opportunities to hoist loads as follows. If BB3 contains a PHI of
3424 two loads, one each occurring in BB1 and BB2, and the loads are
3425 provably of adjacent fields in the same structure, then move both
3426 loads into BB0. Of course this can only be done if there are no
3427 dependencies preventing such motion.
3428
3429 One of the hoisted loads will always be speculative, so the
3430 transformation is currently conservative:
3431
3432 - The fields must be strictly adjacent.
3433 - The two fields must occupy a single memory block that is
3434 guaranteed to not cross a page boundary.
3435
3436 The last is difficult to prove, as such memory blocks should be
3437 aligned on the minimum of the stack alignment boundary and the
3438 alignment guaranteed by heap allocation interfaces. Thus we rely
3439 on a parameter for the alignment value.
3440
3441 Provided a good value is used for the last case, the first
3442 restriction could possibly be relaxed. */
3443
3444 static void
3445 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
3446 basic_block bb2, basic_block bb3)
3447 {
3448 int param_align = param_l1_cache_line_size;
3449 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
3450 gphi_iterator gsi;
3451
3452 /* Walk the phis in bb3 looking for an opportunity. We are looking
3453 for phis of two SSA names, one each of which is defined in bb1 and
3454 bb2. */
3455 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
3456 {
3457 gphi *phi_stmt = gsi.phi ();
3458 gimple *def1, *def2;
3459 tree arg1, arg2, ref1, ref2, field1, field2;
3460 tree tree_offset1, tree_offset2, tree_size2, next;
3461 int offset1, offset2, size2;
3462 unsigned align1;
3463 gimple_stmt_iterator gsi2;
3464 basic_block bb_for_def1, bb_for_def2;
3465
3466 if (gimple_phi_num_args (phi_stmt) != 2
3467 || virtual_operand_p (gimple_phi_result (phi_stmt)))
3468 continue;
3469
3470 arg1 = gimple_phi_arg_def (phi_stmt, 0);
3471 arg2 = gimple_phi_arg_def (phi_stmt, 1);
3472
3473 if (TREE_CODE (arg1) != SSA_NAME
3474 || TREE_CODE (arg2) != SSA_NAME
3475 || SSA_NAME_IS_DEFAULT_DEF (arg1)
3476 || SSA_NAME_IS_DEFAULT_DEF (arg2))
3477 continue;
3478
3479 def1 = SSA_NAME_DEF_STMT (arg1);
3480 def2 = SSA_NAME_DEF_STMT (arg2);
3481
3482 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
3483 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
3484 continue;
3485
3486 /* Check the mode of the arguments to be sure a conditional move
3487 can be generated for it. */
3488 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
3489 == CODE_FOR_nothing)
3490 continue;
3491
3492 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
3493 if (!gimple_assign_single_p (def1)
3494 || !gimple_assign_single_p (def2)
3495 || gimple_has_volatile_ops (def1)
3496 || gimple_has_volatile_ops (def2))
3497 continue;
3498
3499 ref1 = gimple_assign_rhs1 (def1);
3500 ref2 = gimple_assign_rhs1 (def2);
3501
3502 if (TREE_CODE (ref1) != COMPONENT_REF
3503 || TREE_CODE (ref2) != COMPONENT_REF)
3504 continue;
3505
3506 /* The zeroth operand of the two component references must be
3507 identical. It is not sufficient to compare get_base_address of
3508 the two references, because this could allow for different
3509 elements of the same array in the two trees. It is not safe to
3510 assume that the existence of one array element implies the
3511 existence of a different one. */
3512 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
3513 continue;
3514
3515 field1 = TREE_OPERAND (ref1, 1);
3516 field2 = TREE_OPERAND (ref2, 1);
3517
3518 /* Check for field adjacency, and ensure field1 comes first. */
3519 for (next = DECL_CHAIN (field1);
3520 next && TREE_CODE (next) != FIELD_DECL;
3521 next = DECL_CHAIN (next))
3522 ;
3523
3524 if (next != field2)
3525 {
3526 for (next = DECL_CHAIN (field2);
3527 next && TREE_CODE (next) != FIELD_DECL;
3528 next = DECL_CHAIN (next))
3529 ;
3530
3531 if (next != field1)
3532 continue;
3533
3534 std::swap (field1, field2);
3535 std::swap (def1, def2);
3536 }
3537
3538 bb_for_def1 = gimple_bb (def1);
3539 bb_for_def2 = gimple_bb (def2);
3540
3541 /* Check for proper alignment of the first field. */
3542 tree_offset1 = bit_position (field1);
3543 tree_offset2 = bit_position (field2);
3544 tree_size2 = DECL_SIZE (field2);
3545
3546 if (!tree_fits_uhwi_p (tree_offset1)
3547 || !tree_fits_uhwi_p (tree_offset2)
3548 || !tree_fits_uhwi_p (tree_size2))
3549 continue;
3550
3551 offset1 = tree_to_uhwi (tree_offset1);
3552 offset2 = tree_to_uhwi (tree_offset2);
3553 size2 = tree_to_uhwi (tree_size2);
3554 align1 = DECL_ALIGN (field1) % param_align_bits;
3555
3556 if (offset1 % BITS_PER_UNIT != 0)
3557 continue;
3558
3559 /* For profitability, the two field references should fit within
3560 a single cache line. */
3561 if (align1 + offset2 - offset1 + size2 > param_align_bits)
3562 continue;
3563
3564 /* The two expressions cannot be dependent upon vdefs defined
3565 in bb1/bb2. */
3566 if (local_mem_dependence (def1, bb_for_def1)
3567 || local_mem_dependence (def2, bb_for_def2))
3568 continue;
3569
3570 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
3571 bb0. We hoist the first one first so that a cache miss is handled
3572 efficiently regardless of hardware cache-fill policy. */
3573 gsi2 = gsi_for_stmt (def1);
3574 gsi_move_to_bb_end (&gsi2, bb0);
3575 gsi2 = gsi_for_stmt (def2);
3576 gsi_move_to_bb_end (&gsi2, bb0);
3577 statistics_counter_event (cfun, "hoisted loads", 1);
3578
3579 if (dump_file && (dump_flags & TDF_DETAILS))
3580 {
3581 fprintf (dump_file,
3582 "\nHoisting adjacent loads from %d and %d into %d: \n",
3583 bb_for_def1->index, bb_for_def2->index, bb0->index);
3584 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
3585 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
3586 }
3587 }
3588 }
3589
3590 /* Determine whether we should attempt to hoist adjacent loads out of
3591 diamond patterns in pass_phiopt. Always hoist loads if
3592 -fhoist-adjacent-loads is specified and the target machine has
3593 both a conditional move instruction and a defined cache line size. */
3594
3595 static bool
3596 gate_hoist_loads (void)
3597 {
3598 return (flag_hoist_adjacent_loads == 1
3599 && param_l1_cache_line_size
3600 && HAVE_conditional_move);
3601 }
3602
3603 /* This pass tries to replaces an if-then-else block with an
3604 assignment. We have four kinds of transformations. Some of these
3605 transformations are also performed by the ifcvt RTL optimizer.
3606
3607 Conditional Replacement
3608 -----------------------
3609
3610 This transformation, implemented in match_simplify_replacement,
3611 replaces
3612
3613 bb0:
3614 if (cond) goto bb2; else goto bb1;
3615 bb1:
3616 bb2:
3617 x = PHI <0 (bb1), 1 (bb0), ...>;
3618
3619 with
3620
3621 bb0:
3622 x' = cond;
3623 goto bb2;
3624 bb2:
3625 x = PHI <x' (bb0), ...>;
3626
3627 We remove bb1 as it becomes unreachable. This occurs often due to
3628 gimplification of conditionals.
3629
3630 Value Replacement
3631 -----------------
3632
3633 This transformation, implemented in value_replacement, replaces
3634
3635 bb0:
3636 if (a != b) goto bb2; else goto bb1;
3637 bb1:
3638 bb2:
3639 x = PHI <a (bb1), b (bb0), ...>;
3640
3641 with
3642
3643 bb0:
3644 bb2:
3645 x = PHI <b (bb0), ...>;
3646
3647 This opportunity can sometimes occur as a result of other
3648 optimizations.
3649
3650
3651 Another case caught by value replacement looks like this:
3652
3653 bb0:
3654 t1 = a == CONST;
3655 t2 = b > c;
3656 t3 = t1 & t2;
3657 if (t3 != 0) goto bb1; else goto bb2;
3658 bb1:
3659 bb2:
3660 x = PHI (CONST, a)
3661
3662 Gets replaced with:
3663 bb0:
3664 bb2:
3665 t1 = a == CONST;
3666 t2 = b > c;
3667 t3 = t1 & t2;
3668 x = a;
3669
3670 ABS Replacement
3671 ---------------
3672
3673 This transformation, implemented in abs_replacement, replaces
3674
3675 bb0:
3676 if (a >= 0) goto bb2; else goto bb1;
3677 bb1:
3678 x = -a;
3679 bb2:
3680 x = PHI <x (bb1), a (bb0), ...>;
3681
3682 with
3683
3684 bb0:
3685 x' = ABS_EXPR< a >;
3686 bb2:
3687 x = PHI <x' (bb0), ...>;
3688
3689 MIN/MAX Replacement
3690 -------------------
3691
3692 This transformation, minmax_replacement replaces
3693
3694 bb0:
3695 if (a <= b) goto bb2; else goto bb1;
3696 bb1:
3697 bb2:
3698 x = PHI <b (bb1), a (bb0), ...>;
3699
3700 with
3701
3702 bb0:
3703 x' = MIN_EXPR (a, b)
3704 bb2:
3705 x = PHI <x' (bb0), ...>;
3706
3707 A similar transformation is done for MAX_EXPR.
3708
3709
3710 This pass also performs a fifth transformation of a slightly different
3711 flavor.
3712
3713 Factor conversion in COND_EXPR
3714 ------------------------------
3715
3716 This transformation factors the conversion out of COND_EXPR with
3717 factor_out_conditional_conversion.
3718
3719 For example:
3720 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3721 <bb 3>:
3722 tmp = (int) a;
3723 <bb 4>:
3724 tmp = PHI <tmp, CST>
3725
3726 Into:
3727 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3728 <bb 3>:
3729 <bb 4>:
3730 a = PHI <a, CST>
3731 tmp = (int) a;
3732
3733 Adjacent Load Hoisting
3734 ----------------------
3735
3736 This transformation replaces
3737
3738 bb0:
3739 if (...) goto bb2; else goto bb1;
3740 bb1:
3741 x1 = (<expr>).field1;
3742 goto bb3;
3743 bb2:
3744 x2 = (<expr>).field2;
3745 bb3:
3746 # x = PHI <x1, x2>;
3747
3748 with
3749
3750 bb0:
3751 x1 = (<expr>).field1;
3752 x2 = (<expr>).field2;
3753 if (...) goto bb2; else goto bb1;
3754 bb1:
3755 goto bb3;
3756 bb2:
3757 bb3:
3758 # x = PHI <x1, x2>;
3759
3760 The purpose of this transformation is to enable generation of conditional
3761 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
3762 the loads is speculative, the transformation is restricted to very
3763 specific cases to avoid introducing a page fault. We are looking for
3764 the common idiom:
3765
3766 if (...)
3767 x = y->left;
3768 else
3769 x = y->right;
3770
3771 where left and right are typically adjacent pointers in a tree structure. */
3772
3773 namespace {
3774
3775 const pass_data pass_data_phiopt =
3776 {
3777 GIMPLE_PASS, /* type */
3778 "phiopt", /* name */
3779 OPTGROUP_NONE, /* optinfo_flags */
3780 TV_TREE_PHIOPT, /* tv_id */
3781 ( PROP_cfg | PROP_ssa ), /* properties_required */
3782 0, /* properties_provided */
3783 0, /* properties_destroyed */
3784 0, /* todo_flags_start */
3785 0, /* todo_flags_finish */
3786 };
3787
3788 class pass_phiopt : public gimple_opt_pass
3789 {
3790 public:
3791 pass_phiopt (gcc::context *ctxt)
3792 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
3793 {}
3794
3795 /* opt_pass methods: */
3796 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
3797 void set_pass_param (unsigned n, bool param)
3798 {
3799 gcc_assert (n == 0);
3800 early_p = param;
3801 }
3802 virtual bool gate (function *) { return flag_ssa_phiopt; }
3803 virtual unsigned int execute (function *)
3804 {
3805 return tree_ssa_phiopt_worker (false,
3806 !early_p ? gate_hoist_loads () : false,
3807 early_p);
3808 }
3809
3810 private:
3811 bool early_p;
3812 }; // class pass_phiopt
3813
3814 } // anon namespace
3815
3816 gimple_opt_pass *
3817 make_pass_phiopt (gcc::context *ctxt)
3818 {
3819 return new pass_phiopt (ctxt);
3820 }
3821
3822 namespace {
3823
3824 const pass_data pass_data_cselim =
3825 {
3826 GIMPLE_PASS, /* type */
3827 "cselim", /* name */
3828 OPTGROUP_NONE, /* optinfo_flags */
3829 TV_TREE_PHIOPT, /* tv_id */
3830 ( PROP_cfg | PROP_ssa ), /* properties_required */
3831 0, /* properties_provided */
3832 0, /* properties_destroyed */
3833 0, /* todo_flags_start */
3834 0, /* todo_flags_finish */
3835 };
3836
3837 class pass_cselim : public gimple_opt_pass
3838 {
3839 public:
3840 pass_cselim (gcc::context *ctxt)
3841 : gimple_opt_pass (pass_data_cselim, ctxt)
3842 {}
3843
3844 /* opt_pass methods: */
3845 virtual bool gate (function *) { return flag_tree_cselim; }
3846 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
3847
3848 }; // class pass_cselim
3849
3850 } // anon namespace
3851
3852 gimple_opt_pass *
3853 make_pass_cselim (gcc::context *ctxt)
3854 {
3855 return new pass_cselim (ctxt);
3856 }