]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-phiopt.c
Add mips option dependency only config with loongson target.
[thirdparty/gcc.git] / gcc / tree-ssa-phiopt.c
CommitLineData
4ee9c684 1/* Optimization of PHI nodes by converting them into straightline code.
8e8f6434 2 Copyright (C) 2004-2018 Free Software Foundation, Inc.
4ee9c684 3
4This file is part of GCC.
20e5647c 5
4ee9c684 6GCC is free software; you can redistribute it and/or modify it
7under the terms of the GNU General Public License as published by the
8c4c00c1 8Free Software Foundation; either version 3, or (at your option) any
4ee9c684 9later version.
20e5647c 10
4ee9c684 11GCC is distributed in the hope that it will be useful, but WITHOUT
12ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
20e5647c 15
4ee9c684 16You should have received a copy of the GNU General Public License
8c4c00c1 17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
4ee9c684 19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
9ef16211 23#include "backend.h"
7c29e30e 24#include "insn-codes.h"
25#include "rtl.h"
4ee9c684 26#include "tree.h"
9ef16211 27#include "gimple.h"
7c29e30e 28#include "cfghooks.h"
29#include "tree-pass.h"
9ef16211 30#include "ssa.h"
7c29e30e 31#include "optabs-tree.h"
32#include "insn-config.h"
33#include "gimple-pretty-print.h"
b20a8bb4 34#include "fold-const.h"
9ed99284 35#include "stor-layout.h"
94ea8568 36#include "cfganal.h"
a8783bee 37#include "gimplify.h"
dcf1a1ec 38#include "gimple-iterator.h"
e795d6e1 39#include "gimplify-me.h"
073c1fd5 40#include "tree-cfg.h"
073c1fd5 41#include "tree-dfa.h"
e6d0e152 42#include "domwalk.h"
ec611e12 43#include "cfgloop.h"
44#include "tree-data-ref.h"
f6568ea4 45#include "tree-scalar-evolution.h"
b6814ca0 46#include "tree-inline.h"
0d8001a7 47#include "params.h"
a75cc7d9 48#include "case-cfn-macros.h"
239e9670 49
a2c2432d 50static unsigned int tree_ssa_phiopt_worker (bool, bool, bool);
a4844041 51static bool conditional_replacement (basic_block, basic_block,
1a91d914 52 edge, edge, gphi *, tree, tree);
8100256d 53static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree,
54 gimple *);
fb9912ea 55static int value_replacement (basic_block, basic_block,
42acab1c 56 edge, edge, gimple *, tree, tree);
a4844041 57static bool minmax_replacement (basic_block, basic_block,
42acab1c 58 edge, edge, gimple *, tree, tree);
a4844041 59static bool abs_replacement (basic_block, basic_block,
42acab1c 60 edge, edge, gimple *, tree, tree);
a75cc7d9 61static bool cond_removal_in_popcount_pattern (basic_block, basic_block,
62 edge, edge, gimple *, tree, tree);
e6d0e152 63static bool cond_store_replacement (basic_block, basic_block, edge, edge,
431205b7 64 hash_set<tree> *);
91cf53d5 65static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
431205b7 66static hash_set<tree> * get_non_trapping ();
42acab1c 67static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
239e9670 68static void hoist_adjacent_loads (basic_block, basic_block,
69 basic_block, basic_block);
70static bool gate_hoist_loads (void);
902929aa 71
e6d0e152 72/* This pass tries to transform conditional stores into unconditional
73 ones, enabling further simplifications with the simpler then and else
74 blocks. In particular it replaces this:
75
76 bb0:
77 if (cond) goto bb2; else goto bb1;
78 bb1:
91cf53d5 79 *p = RHS;
e6d0e152 80 bb2:
81
82 with
83
84 bb0:
85 if (cond) goto bb1; else goto bb2;
86 bb1:
87 condtmp' = *p;
88 bb2:
89 condtmp = PHI <RHS, condtmp'>
91cf53d5 90 *p = condtmp;
e6d0e152 91
92 This transformation can only be done under several constraints,
91cf53d5 93 documented below. It also replaces:
94
95 bb0:
96 if (cond) goto bb2; else goto bb1;
97 bb1:
98 *p = RHS1;
99 goto bb3;
100 bb2:
101 *p = RHS2;
102 bb3:
103
104 with
105
106 bb0:
107 if (cond) goto bb3; else goto bb1;
108 bb1:
109 bb3:
110 condtmp = PHI <RHS1, RHS2>
111 *p = condtmp; */
e6d0e152 112
113static unsigned int
114tree_ssa_cs_elim (void)
115{
f6568ea4 116 unsigned todo;
117 /* ??? We are not interested in loop related info, but the following
118 will create it, ICEing as we didn't init loops with pre-headers.
119 An interfacing issue of find_data_references_in_bb. */
120 loop_optimizer_init (LOOPS_NORMAL);
121 scev_initialize ();
a2c2432d 122 todo = tree_ssa_phiopt_worker (true, false, false);
f6568ea4 123 scev_finalize ();
124 loop_optimizer_finalize ();
125 return todo;
e6d0e152 126}
127
c3597b05 128/* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
129
1a91d914 130static gphi *
c3597b05 131single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
132{
133 gimple_stmt_iterator i;
1a91d914 134 gphi *phi = NULL;
c3597b05 135 if (gimple_seq_singleton_p (seq))
1a91d914 136 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
c3597b05 137 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
138 {
1a91d914 139 gphi *p = as_a <gphi *> (gsi_stmt (i));
c3597b05 140 /* If the PHI arguments are equal then we can skip this PHI. */
141 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
142 gimple_phi_arg_def (p, e1->dest_idx)))
143 continue;
144
145 /* If we already have a PHI that has the two edge arguments are
146 different, then return it is not a singleton for these PHIs. */
147 if (phi)
148 return NULL;
149
150 phi = p;
151 }
152 return phi;
153}
154
e6d0e152 155/* The core routine of conditional store replacement and normal
156 phi optimizations. Both share much of the infrastructure in how
157 to match applicable basic block patterns. DO_STORE_ELIM is true
239e9670 158 when we want to do conditional store replacement, false otherwise.
f32420fb 159 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
239e9670 160 of diamond control flow patterns, false otherwise. */
e6d0e152 161static unsigned int
a2c2432d 162tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p)
4ee9c684 163{
164 basic_block bb;
194899bf 165 basic_block *bb_order;
166 unsigned n, i;
1e4b21e3 167 bool cfgchanged = false;
431205b7 168 hash_set<tree> *nontrap = 0;
e6d0e152 169
170 if (do_store_elim)
03d37e4e 171 /* Calculate the set of non-trapping memory accesses. */
172 nontrap = get_non_trapping ();
194899bf 173
174 /* Search every basic block for COND_EXPR we may be able to optimize.
175
176 We walk the blocks in order that guarantees that a block with
177 a single predecessor is processed before the predecessor.
178 This ensures that we collapse inner ifs before visiting the
179 outer ones, and also that we do not try to visit a removed
180 block. */
ba4d2b2f 181 bb_order = single_pred_before_succ_order ();
a28770e1 182 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
4ee9c684 183
48e1416a 184 for (i = 0; i < n; i++)
4ee9c684 185 {
42acab1c 186 gimple *cond_stmt;
1a91d914 187 gphi *phi;
33784d89 188 basic_block bb1, bb2;
189 edge e1, e2;
194899bf 190 tree arg0, arg1;
191
192 bb = bb_order[i];
20e5647c 193
75a70cf9 194 cond_stmt = last_stmt (bb);
195 /* Check to see if the last statement is a GIMPLE_COND. */
196 if (!cond_stmt
197 || gimple_code (cond_stmt) != GIMPLE_COND)
33784d89 198 continue;
20e5647c 199
33784d89 200 e1 = EDGE_SUCC (bb, 0);
201 bb1 = e1->dest;
202 e2 = EDGE_SUCC (bb, 1);
203 bb2 = e2->dest;
20e5647c 204
33784d89 205 /* We cannot do the optimization on abnormal edges. */
206 if ((e1->flags & EDGE_ABNORMAL) != 0
207 || (e2->flags & EDGE_ABNORMAL) != 0)
208 continue;
20e5647c 209
33784d89 210 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
ea091dfd 211 if (EDGE_COUNT (bb1->succs) == 0
33784d89 212 || bb2 == NULL
ea091dfd 213 || EDGE_COUNT (bb2->succs) == 0)
33784d89 214 continue;
20e5647c 215
33784d89 216 /* Find the bb which is the fall through to the other. */
217 if (EDGE_SUCC (bb1, 0)->dest == bb2)
218 ;
219 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
220 {
a4f59596 221 std::swap (bb1, bb2);
222 std::swap (e1, e2);
33784d89 223 }
91cf53d5 224 else if (do_store_elim
225 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
226 {
227 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
228
229 if (!single_succ_p (bb1)
230 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
231 || !single_succ_p (bb2)
232 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
233 || EDGE_COUNT (bb3->preds) != 2)
234 continue;
235 if (cond_if_else_store_replacement (bb1, bb2, bb3))
236 cfgchanged = true;
237 continue;
238 }
239e9670 239 else if (do_hoist_loads
8100256d 240 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
239e9670 241 {
242 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
243
244 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
245 && single_succ_p (bb1)
246 && single_succ_p (bb2)
247 && single_pred_p (bb1)
248 && single_pred_p (bb2)
249 && EDGE_COUNT (bb->succs) == 2
250 && EDGE_COUNT (bb3->preds) == 2
251 /* If one edge or the other is dominant, a conditional move
252 is likely to perform worse than the well-predicted branch. */
253 && !predictable_edge_p (EDGE_SUCC (bb, 0))
254 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
255 hoist_adjacent_loads (bb, bb1, bb2, bb3);
256 continue;
257 }
33784d89 258 else
f32420fb 259 continue;
20e5647c 260
33784d89 261 e1 = EDGE_SUCC (bb1, 0);
20e5647c 262
33784d89 263 /* Make sure that bb1 is just a fall through. */
db5ba14c 264 if (!single_succ_p (bb1)
33784d89 265 || (e1->flags & EDGE_FALLTHRU) == 0)
266 continue;
20e5647c 267
3472707f 268 /* Also make sure that bb1 only have one predecessor and that it
269 is bb. */
ea091dfd 270 if (!single_pred_p (bb1)
271 || single_pred (bb1) != bb)
33784d89 272 continue;
20e5647c 273
e6d0e152 274 if (do_store_elim)
275 {
276 /* bb1 is the middle block, bb2 the join block, bb the split block,
277 e1 the fallthrough edge from bb1 to bb2. We can't do the
278 optimization if the join block has more than two predecessors. */
279 if (EDGE_COUNT (bb2->preds) > 2)
280 continue;
281 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
282 cfgchanged = true;
283 }
284 else
285 {
75a70cf9 286 gimple_seq phis = phi_nodes (bb2);
2109076a 287 gimple_stmt_iterator gsi;
fb9912ea 288 bool candorest = true;
c3597b05 289
fb9912ea 290 /* Value replacement can work with more than one PHI
291 so try that first. */
a2c2432d 292 if (!early_p)
293 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
294 {
295 phi = as_a <gphi *> (gsi_stmt (gsi));
296 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
297 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
298 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
299 {
300 candorest = false;
301 cfgchanged = true;
302 break;
303 }
304 }
e6d0e152 305
fb9912ea 306 if (!candorest)
307 continue;
f32420fb 308
c3597b05 309 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
2109076a 310 if (!phi)
e6d0e152 311 continue;
312
75a70cf9 313 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
314 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
e6d0e152 315
316 /* Something is wrong if we cannot find the arguments in the PHI
317 node. */
5dccd071 318 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
e6d0e152 319
5dccd071 320 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
8100256d 321 arg0, arg1,
322 cond_stmt);
5dccd071 323 if (newphi != NULL)
29a78fec 324 {
5dccd071 325 phi = newphi;
29a78fec 326 /* factor_out_conditional_conversion may create a new PHI in
327 BB2 and eliminate an existing PHI in BB2. Recompute values
328 that may be affected by that change. */
29a78fec 329 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
330 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
5dccd071 331 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
29a78fec 332 }
333
e6d0e152 334 /* Do the replacement of conditional if it can be done. */
a2c2432d 335 if (!early_p
336 && conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
e6d0e152 337 cfgchanged = true;
e6d0e152 338 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
339 cfgchanged = true;
a2c2432d 340 else if (!early_p
341 && cond_removal_in_popcount_pattern (bb, bb1, e1, e2,
342 phi, arg0, arg1))
a75cc7d9 343 cfgchanged = true;
e6d0e152 344 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
345 cfgchanged = true;
346 }
194899bf 347 }
348
349 free (bb_order);
48e1416a 350
e6d0e152 351 if (do_store_elim)
431205b7 352 delete nontrap;
e6d0e152 353 /* If the CFG has changed, we should cleanup the CFG. */
354 if (cfgchanged && do_store_elim)
355 {
356 /* In cond-store replacement we have added some loads on edges
357 and new VOPS (as we moved the store, and created a load). */
75a70cf9 358 gsi_commit_edge_inserts ();
e6d0e152 359 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
360 }
361 else if (cfgchanged)
362 return TODO_cleanup_cfg;
363 return 0;
194899bf 364}
365
fccee353 366/* Replace PHI node element whose edge is E in block BB with variable NEW.
33784d89 367 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
902929aa 368 is known to have two edges, one of which must reach BB). */
369
370static void
a4844041 371replace_phi_edge_with_variable (basic_block cond_block,
42acab1c 372 edge e, gimple *phi, tree new_tree)
902929aa 373{
75a70cf9 374 basic_block bb = gimple_bb (phi);
0e1a77e1 375 basic_block block_to_remove;
75a70cf9 376 gimple_stmt_iterator gsi;
33784d89 377
20e5647c 378 /* Change the PHI argument to new. */
f0d6e81c 379 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
0e1a77e1 380
0e1a77e1 381 /* Remove the empty basic block. */
cd665a06 382 if (EDGE_SUCC (cond_block, 0)->dest == bb)
902929aa 383 {
cd665a06 384 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
385 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
720cfc43 386 EDGE_SUCC (cond_block, 0)->probability = profile_probability::always ();
0e1a77e1 387
cd665a06 388 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
902929aa 389 }
390 else
391 {
cd665a06 392 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
393 EDGE_SUCC (cond_block, 1)->flags
902929aa 394 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
720cfc43 395 EDGE_SUCC (cond_block, 1)->probability = profile_probability::always ();
0e1a77e1 396
cd665a06 397 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
902929aa 398 }
0e1a77e1 399 delete_basic_block (block_to_remove);
20e5647c 400
902929aa 401 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
75a70cf9 402 gsi = gsi_last_bb (cond_block);
403 gsi_remove (&gsi, true);
20e5647c 404
902929aa 405 if (dump_file && (dump_flags & TDF_DETAILS))
406 fprintf (dump_file,
407 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
408 cond_block->index,
409 bb->index);
410}
411
29a78fec 412/* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
413 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
8100256d 414 to the result of PHI stmt. COND_STMT is the controlling predicate.
415 Return the newly-created PHI, if any. */
29a78fec 416
5dccd071 417static gphi *
29a78fec 418factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
8100256d 419 tree arg0, tree arg1, gimple *cond_stmt)
29a78fec 420{
42acab1c 421 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
29a78fec 422 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
423 tree temp, result;
424 gphi *newphi;
425 gimple_stmt_iterator gsi, gsi_for_def;
426 source_location locus = gimple_location (phi);
427 enum tree_code convert_code;
428
429 /* Handle only PHI statements with two arguments. TODO: If all
430 other arguments to PHI are INTEGER_CST or if their defining
431 statement have the same unary operation, we can handle more
432 than two arguments too. */
433 if (gimple_phi_num_args (phi) != 2)
5dccd071 434 return NULL;
29a78fec 435
436 /* First canonicalize to simplify tests. */
437 if (TREE_CODE (arg0) != SSA_NAME)
438 {
439 std::swap (arg0, arg1);
440 std::swap (e0, e1);
441 }
442
443 if (TREE_CODE (arg0) != SSA_NAME
444 || (TREE_CODE (arg1) != SSA_NAME
445 && TREE_CODE (arg1) != INTEGER_CST))
5dccd071 446 return NULL;
29a78fec 447
448 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
449 a conversion. */
450 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
552821b4 451 if (!gimple_assign_cast_p (arg0_def_stmt))
5dccd071 452 return NULL;
29a78fec 453
454 /* Use the RHS as new_arg0. */
455 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
456 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
457 if (convert_code == VIEW_CONVERT_EXPR)
552821b4 458 {
459 new_arg0 = TREE_OPERAND (new_arg0, 0);
460 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
461 return NULL;
462 }
29a78fec 463
464 if (TREE_CODE (arg1) == SSA_NAME)
465 {
466 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
467 is a conversion. */
468 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
469 if (!is_gimple_assign (arg1_def_stmt)
470 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
5dccd071 471 return NULL;
29a78fec 472
473 /* Use the RHS as new_arg1. */
474 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
475 if (convert_code == VIEW_CONVERT_EXPR)
476 new_arg1 = TREE_OPERAND (new_arg1, 0);
477 }
478 else
479 {
480 /* If arg1 is an INTEGER_CST, fold it to new type. */
481 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
482 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
483 {
484 if (gimple_assign_cast_p (arg0_def_stmt))
8100256d 485 {
486 /* For the INTEGER_CST case, we are just moving the
487 conversion from one place to another, which can often
488 hurt as the conversion moves further away from the
489 statement that computes the value. So, perform this
490 only if new_arg0 is an operand of COND_STMT, or
491 if arg0_def_stmt is the only non-debug stmt in
492 its basic block, because then it is possible this
493 could enable further optimizations (minmax replacement
494 etc.). See PR71016. */
495 if (new_arg0 != gimple_cond_lhs (cond_stmt)
496 && new_arg0 != gimple_cond_rhs (cond_stmt)
497 && gimple_bb (arg0_def_stmt) == e0->src)
498 {
499 gsi = gsi_for_stmt (arg0_def_stmt);
500 gsi_prev_nondebug (&gsi);
501 if (!gsi_end_p (gsi))
502 return NULL;
503 gsi = gsi_for_stmt (arg0_def_stmt);
504 gsi_next_nondebug (&gsi);
505 if (!gsi_end_p (gsi))
506 return NULL;
507 }
508 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
509 }
29a78fec 510 else
5dccd071 511 return NULL;
29a78fec 512 }
513 else
5dccd071 514 return NULL;
29a78fec 515 }
516
517 /* If arg0/arg1 have > 1 use, then this transformation actually increases
518 the number of expressions evaluated at runtime. */
519 if (!has_single_use (arg0)
520 || (arg1_def_stmt && !has_single_use (arg1)))
5dccd071 521 return NULL;
29a78fec 522
523 /* If types of new_arg0 and new_arg1 are different bailout. */
524 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
5dccd071 525 return NULL;
29a78fec 526
527 /* Create a new PHI stmt. */
528 result = PHI_RESULT (phi);
529 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
530 newphi = create_phi_node (temp, gimple_bb (phi));
531
532 if (dump_file && (dump_flags & TDF_DETAILS))
533 {
534 fprintf (dump_file, "PHI ");
1ffa4346 535 print_generic_expr (dump_file, gimple_phi_result (phi));
29a78fec 536 fprintf (dump_file,
537 " changed to factor conversion out from COND_EXPR.\n");
538 fprintf (dump_file, "New stmt with CAST that defines ");
1ffa4346 539 print_generic_expr (dump_file, result);
29a78fec 540 fprintf (dump_file, ".\n");
541 }
542
543 /* Remove the old cast(s) that has single use. */
544 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
545 gsi_remove (&gsi_for_def, true);
3f6d43bf 546 release_defs (arg0_def_stmt);
547
29a78fec 548 if (arg1_def_stmt)
549 {
550 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
551 gsi_remove (&gsi_for_def, true);
3f6d43bf 552 release_defs (arg1_def_stmt);
29a78fec 553 }
554
555 add_phi_arg (newphi, new_arg0, e0, locus);
556 add_phi_arg (newphi, new_arg1, e1, locus);
557
558 /* Create the conversion stmt and insert it. */
559 if (convert_code == VIEW_CONVERT_EXPR)
18b3dab4 560 {
561 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
562 new_stmt = gimple_build_assign (result, temp);
563 }
564 else
565 new_stmt = gimple_build_assign (result, convert_code, temp);
29a78fec 566 gsi = gsi_after_labels (gimple_bb (phi));
567 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
568
3f6d43bf 569 /* Remove the original PHI stmt. */
29a78fec 570 gsi = gsi_for_stmt (phi);
571 gsi_remove (&gsi, true);
5dccd071 572 return newphi;
29a78fec 573}
574
902929aa 575/* The function conditional_replacement does the main work of doing the
576 conditional replacement. Return true if the replacement is done.
577 Otherwise return false.
578 BB is the basic block where the replacement is going to be done on. ARG0
dac49aa5 579 is argument 0 from PHI. Likewise for ARG1. */
902929aa 580
581static bool
33784d89 582conditional_replacement (basic_block cond_bb, basic_block middle_bb,
1a91d914 583 edge e0, edge e1, gphi *phi,
33784d89 584 tree arg0, tree arg1)
902929aa 585{
586 tree result;
42acab1c 587 gimple *stmt;
1a91d914 588 gassign *new_stmt;
75a70cf9 589 tree cond;
590 gimple_stmt_iterator gsi;
902929aa 591 edge true_edge, false_edge;
75a70cf9 592 tree new_var, new_var2;
678919fd 593 bool neg;
902929aa 594
435e1a75 595 /* FIXME: Gimplification of complex type is too hard for now. */
47b88316 596 /* We aren't prepared to handle vectors either (and it is a question
597 if it would be worthwhile anyway). */
598 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
599 || POINTER_TYPE_P (TREE_TYPE (arg0)))
600 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
601 || POINTER_TYPE_P (TREE_TYPE (arg1))))
435e1a75 602 return false;
603
678919fd 604 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
605 convert it to the conditional. */
902929aa 606 if ((integer_zerop (arg0) && integer_onep (arg1))
607 || (integer_zerop (arg1) && integer_onep (arg0)))
678919fd 608 neg = false;
609 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
610 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
611 neg = true;
902929aa 612 else
613 return false;
20e5647c 614
33784d89 615 if (!empty_block_p (middle_bb))
902929aa 616 return false;
20e5647c 617
75a70cf9 618 /* At this point we know we have a GIMPLE_COND with two successors.
2ab0a163 619 One successor is BB, the other successor is an empty block which
620 falls through into BB.
20e5647c 621
2ab0a163 622 There is a single PHI node at the join point (BB) and its arguments
678919fd 623 are constants (0, 1) or (0, -1).
20e5647c 624
2ab0a163 625 So, given the condition COND, and the two PHI arguments, we can
20e5647c 626 rewrite this PHI into non-branching code:
627
2ab0a163 628 dest = (COND) or dest = COND'
20e5647c 629
2ab0a163 630 We use the condition as-is if the argument associated with the
631 true edge has the value one or the argument associated with the
632 false edge as the value zero. Note that those conditions are not
75a70cf9 633 the same since only one of the outgoing edges from the GIMPLE_COND
2ab0a163 634 will directly reach BB and thus be associated with an argument. */
ae5a4794 635
75a70cf9 636 stmt = last_stmt (cond_bb);
637 result = PHI_RESULT (phi);
b2a02a0e 638
75a70cf9 639 /* To handle special cases like floating point comparison, it is easier and
640 less error-prone to build a tree and gimplify it on the fly though it is
641 less efficient. */
6f9714b3 642 cond = fold_build2_loc (gimple_location (stmt),
643 gimple_cond_code (stmt), boolean_type_node,
644 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
4ee9c684 645
75a70cf9 646 /* We need to know which is the true edge and which is the false
647 edge so that we know when to invert the condition below. */
648 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
649 if ((e0 == true_edge && integer_zerop (arg0))
678919fd 650 || (e0 == false_edge && !integer_zerop (arg0))
75a70cf9 651 || (e1 == true_edge && integer_zerop (arg1))
678919fd 652 || (e1 == false_edge && !integer_zerop (arg1)))
6f9714b3 653 cond = fold_build1_loc (gimple_location (stmt),
678919fd 654 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
655
656 if (neg)
657 {
658 cond = fold_convert_loc (gimple_location (stmt),
659 TREE_TYPE (result), cond);
660 cond = fold_build1_loc (gimple_location (stmt),
661 NEGATE_EXPR, TREE_TYPE (cond), cond);
662 }
75a70cf9 663
664 /* Insert our new statements at the end of conditional block before the
665 COND_STMT. */
666 gsi = gsi_for_stmt (stmt);
667 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
668 GSI_SAME_STMT);
669
670 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
671 {
efbcb6de 672 source_location locus_0, locus_1;
673
f9e245b2 674 new_var2 = make_ssa_name (TREE_TYPE (result));
e9cf809e 675 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
75a70cf9 676 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
677 new_var = new_var2;
efbcb6de 678
679 /* Set the locus to the first argument, unless is doesn't have one. */
680 locus_0 = gimple_phi_arg_location (phi, 0);
681 locus_1 = gimple_phi_arg_location (phi, 1);
682 if (locus_0 == UNKNOWN_LOCATION)
683 locus_0 = locus_1;
684 gimple_set_location (new_stmt, locus_0);
4ee9c684 685 }
20e5647c 686
75a70cf9 687 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
902929aa 688
4ee9c684 689 /* Note that we optimized this PHI. */
690 return true;
691}
692
17b9476e 693/* Update *ARG which is defined in STMT so that it contains the
694 computed value if that seems profitable. Return true if the
695 statement is made dead by that rewriting. */
696
697static bool
42acab1c 698jump_function_from_stmt (tree *arg, gimple *stmt)
17b9476e 699{
700 enum tree_code code = gimple_assign_rhs_code (stmt);
701 if (code == ADDR_EXPR)
702 {
703 /* For arg = &p->i transform it to p, if possible. */
704 tree rhs1 = gimple_assign_rhs1 (stmt);
773078cb 705 poly_int64 offset;
17b9476e 706 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
707 &offset);
708 if (tem
709 && TREE_CODE (tem) == MEM_REF
773078cb 710 && known_eq (mem_ref_offset (tem) + offset, 0))
17b9476e 711 {
712 *arg = TREE_OPERAND (tem, 0);
713 return true;
714 }
715 }
716 /* TODO: Much like IPA-CP jump-functions we want to handle constant
717 additions symbolically here, and we'd need to update the comparison
718 code that compares the arg + cst tuples in our caller. For now the
719 code above exactly handles the VEC_BASE pattern from vec.h. */
720 return false;
721}
722
f32420fb 723/* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
724 of the form SSA_NAME NE 0.
725
726 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
727 the two input values of the EQ_EXPR match arg0 and arg1.
728
729 If so update *code and return TRUE. Otherwise return FALSE. */
730
731static bool
732rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
733 enum tree_code *code, const_tree rhs)
734{
735 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
736 statement. */
737 if (TREE_CODE (rhs) == SSA_NAME)
738 {
42acab1c 739 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
f32420fb 740
741 /* Verify the defining statement has an EQ_EXPR on the RHS. */
742 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
743 {
744 /* Finally verify the source operands of the EQ_EXPR are equal
745 to arg0 and arg1. */
746 tree op0 = gimple_assign_rhs1 (def1);
747 tree op1 = gimple_assign_rhs2 (def1);
748 if ((operand_equal_for_phi_arg_p (arg0, op0)
749 && operand_equal_for_phi_arg_p (arg1, op1))
750 || (operand_equal_for_phi_arg_p (arg0, op1)
751 && operand_equal_for_phi_arg_p (arg1, op0)))
752 {
753 /* We will perform the optimization. */
754 *code = gimple_assign_rhs_code (def1);
755 return true;
756 }
757 }
758 }
759 return false;
760}
761
762/* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
763
764 Also return TRUE if arg0/arg1 are equal to the source arguments of a
765 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
766
767 Return FALSE otherwise. */
768
769static bool
770operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
42acab1c 771 enum tree_code *code, gimple *cond)
f32420fb 772{
42acab1c 773 gimple *def;
f32420fb 774 tree lhs = gimple_cond_lhs (cond);
775 tree rhs = gimple_cond_rhs (cond);
776
777 if ((operand_equal_for_phi_arg_p (arg0, lhs)
778 && operand_equal_for_phi_arg_p (arg1, rhs))
779 || (operand_equal_for_phi_arg_p (arg1, lhs)
780 && operand_equal_for_phi_arg_p (arg0, rhs)))
781 return true;
782
783 /* Now handle more complex case where we have an EQ comparison
784 which feeds a BIT_AND_EXPR which feeds COND.
785
786 First verify that COND is of the form SSA_NAME NE 0. */
787 if (*code != NE_EXPR || !integer_zerop (rhs)
788 || TREE_CODE (lhs) != SSA_NAME)
789 return false;
790
791 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
792 def = SSA_NAME_DEF_STMT (lhs);
793 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
794 return false;
795
796 /* Now verify arg0/arg1 correspond to the source arguments of an
797 EQ comparison feeding the BIT_AND_EXPR. */
798
799 tree tmp = gimple_assign_rhs1 (def);
800 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
801 return true;
802
803 tmp = gimple_assign_rhs2 (def);
804 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
805 return true;
806
807 return false;
808}
809
b6814ca0 810/* Returns true if ARG is a neutral element for operation CODE
811 on the RIGHT side. */
812
813static bool
814neutral_element_p (tree_code code, tree arg, bool right)
815{
816 switch (code)
817 {
818 case PLUS_EXPR:
819 case BIT_IOR_EXPR:
820 case BIT_XOR_EXPR:
821 return integer_zerop (arg);
822
823 case LROTATE_EXPR:
824 case RROTATE_EXPR:
825 case LSHIFT_EXPR:
826 case RSHIFT_EXPR:
827 case MINUS_EXPR:
828 case POINTER_PLUS_EXPR:
829 return right && integer_zerop (arg);
830
831 case MULT_EXPR:
832 return integer_onep (arg);
833
834 case TRUNC_DIV_EXPR:
835 case CEIL_DIV_EXPR:
836 case FLOOR_DIV_EXPR:
837 case ROUND_DIV_EXPR:
838 case EXACT_DIV_EXPR:
839 return right && integer_onep (arg);
840
841 case BIT_AND_EXPR:
842 return integer_all_onesp (arg);
843
844 default:
845 return false;
846 }
847}
848
849/* Returns true if ARG is an absorbing element for operation CODE. */
850
851static bool
2177bb9a 852absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
b6814ca0 853{
854 switch (code)
855 {
856 case BIT_IOR_EXPR:
857 return integer_all_onesp (arg);
858
859 case MULT_EXPR:
860 case BIT_AND_EXPR:
861 return integer_zerop (arg);
862
0644c120 863 case LSHIFT_EXPR:
864 case RSHIFT_EXPR:
865 case LROTATE_EXPR:
866 case RROTATE_EXPR:
2177bb9a 867 return !right && integer_zerop (arg);
868
0644c120 869 case TRUNC_DIV_EXPR:
870 case CEIL_DIV_EXPR:
871 case FLOOR_DIV_EXPR:
872 case ROUND_DIV_EXPR:
873 case EXACT_DIV_EXPR:
874 case TRUNC_MOD_EXPR:
875 case CEIL_MOD_EXPR:
876 case FLOOR_MOD_EXPR:
877 case ROUND_MOD_EXPR:
2177bb9a 878 return (!right
879 && integer_zerop (arg)
880 && tree_single_nonzero_warnv_p (rval, NULL));
0644c120 881
b6814ca0 882 default:
883 return false;
884 }
885}
886
0beac6fc 887/* The function value_replacement does the main work of doing the value
fb9912ea 888 replacement. Return non-zero if the replacement is done. Otherwise return
889 0. If we remove the middle basic block, return 2.
0beac6fc 890 BB is the basic block where the replacement is going to be done on. ARG0
dac49aa5 891 is argument 0 from the PHI. Likewise for ARG1. */
0beac6fc 892
fb9912ea 893static int
33784d89 894value_replacement (basic_block cond_bb, basic_block middle_bb,
42acab1c 895 edge e0, edge e1, gimple *phi,
33784d89 896 tree arg0, tree arg1)
0beac6fc 897{
17b9476e 898 gimple_stmt_iterator gsi;
42acab1c 899 gimple *cond;
0beac6fc 900 edge true_edge, false_edge;
75a70cf9 901 enum tree_code code;
fb9912ea 902 bool emtpy_or_with_defined_p = true;
0beac6fc 903
904 /* If the type says honor signed zeros we cannot do this
dac49aa5 905 optimization. */
fe994837 906 if (HONOR_SIGNED_ZEROS (arg1))
fb9912ea 907 return 0;
0beac6fc 908
fb9912ea 909 /* If there is a statement in MIDDLE_BB that defines one of the PHI
910 arguments, then adjust arg0 or arg1. */
b6814ca0 911 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
fb9912ea 912 while (!gsi_end_p (gsi))
17b9476e 913 {
42acab1c 914 gimple *stmt = gsi_stmt (gsi);
fb9912ea 915 tree lhs;
916 gsi_next_nondebug (&gsi);
917 if (!is_gimple_assign (stmt))
17b9476e 918 {
a2c2432d 919 if (gimple_code (stmt) != GIMPLE_PREDICT
920 && gimple_code (stmt) != GIMPLE_NOP)
921 emtpy_or_with_defined_p = false;
fb9912ea 922 continue;
17b9476e 923 }
fb9912ea 924 /* Now try to adjust arg0 or arg1 according to the computation
925 in the statement. */
926 lhs = gimple_assign_lhs (stmt);
927 if (!(lhs == arg0
928 && jump_function_from_stmt (&arg0, stmt))
929 || (lhs == arg1
930 && jump_function_from_stmt (&arg1, stmt)))
931 emtpy_or_with_defined_p = false;
17b9476e 932 }
0beac6fc 933
75a70cf9 934 cond = last_stmt (cond_bb);
935 code = gimple_cond_code (cond);
0beac6fc 936
937 /* This transformation is only valid for equality comparisons. */
75a70cf9 938 if (code != NE_EXPR && code != EQ_EXPR)
fb9912ea 939 return 0;
0beac6fc 940
941 /* We need to know which is the true edge and which is the false
942 edge so that we know if have abs or negative abs. */
33784d89 943 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
0beac6fc 944
945 /* At this point we know we have a COND_EXPR with two successors.
946 One successor is BB, the other successor is an empty block which
947 falls through into BB.
948
949 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
950
951 There is a single PHI node at the join point (BB) with two arguments.
952
953 We now need to verify that the two arguments in the PHI node match
954 the two arguments to the equality comparison. */
20e5647c 955
f32420fb 956 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
0beac6fc 957 {
958 edge e;
959 tree arg;
960
50737d20 961 /* For NE_EXPR, we want to build an assignment result = arg where
962 arg is the PHI argument associated with the true edge. For
963 EQ_EXPR we want the PHI argument associated with the false edge. */
75a70cf9 964 e = (code == NE_EXPR ? true_edge : false_edge);
50737d20 965
966 /* Unfortunately, E may not reach BB (it may instead have gone to
967 OTHER_BLOCK). If that is the case, then we want the single outgoing
968 edge from OTHER_BLOCK which reaches BB and represents the desired
969 path from COND_BLOCK. */
33784d89 970 if (e->dest == middle_bb)
ea091dfd 971 e = single_succ_edge (e->dest);
50737d20 972
973 /* Now we know the incoming edge to BB that has the argument for the
974 RHS of our new assignment statement. */
33784d89 975 if (e0 == e)
0beac6fc 976 arg = arg0;
977 else
978 arg = arg1;
979
fb9912ea 980 /* If the middle basic block was empty or is defining the
c3597b05 981 PHI arguments and this is a single phi where the args are different
982 for the edges e0 and e1 then we can remove the middle basic block. */
fb9912ea 983 if (emtpy_or_with_defined_p
c3597b05 984 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
ce75c7c2 985 e0, e1) == phi)
fb9912ea 986 {
987 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
988 /* Note that we optimized this PHI. */
989 return 2;
990 }
991 else
992 {
993 /* Replace the PHI arguments with arg. */
994 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
995 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
996 if (dump_file && (dump_flags & TDF_DETAILS))
997 {
998 fprintf (dump_file, "PHI ");
1ffa4346 999 print_generic_expr (dump_file, gimple_phi_result (phi));
c3597b05 1000 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1001 cond_bb->index);
1ffa4346 1002 print_generic_expr (dump_file, arg);
fb9912ea 1003 fprintf (dump_file, ".\n");
1004 }
1005 return 1;
1006 }
0beac6fc 1007
0beac6fc 1008 }
b6814ca0 1009
ae163345 1010 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1011 gsi = gsi_last_nondebug_bb (middle_bb);
1012 if (gsi_end_p (gsi))
1013 return 0;
1014
1015 gimple *assign = gsi_stmt (gsi);
1016 if (!is_gimple_assign (assign)
b6814ca0 1017 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
1018 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1019 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1020 return 0;
1021
6c96fe34 1022 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1023 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1024 return 0;
1025
ae163345 1026 /* Allow up to 2 cheap preparation statements that prepare argument
1027 for assign, e.g.:
1028 if (y_4 != 0)
1029 goto <bb 3>;
1030 else
1031 goto <bb 4>;
1032 <bb 3>:
1033 _1 = (int) y_4;
1034 iftmp.0_6 = x_5(D) r<< _1;
1035 <bb 4>:
1036 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1037 or:
1038 if (y_3(D) == 0)
1039 goto <bb 4>;
1040 else
1041 goto <bb 3>;
1042 <bb 3>:
1043 y_4 = y_3(D) & 31;
1044 _1 = (int) y_4;
1045 _6 = x_5(D) r<< _1;
1046 <bb 4>:
1047 # _2 = PHI <x_5(D)(2), _6(3)> */
1048 gimple *prep_stmt[2] = { NULL, NULL };
1049 int prep_cnt;
1050 for (prep_cnt = 0; ; prep_cnt++)
1051 {
1052 gsi_prev_nondebug (&gsi);
1053 if (gsi_end_p (gsi))
1054 break;
1055
1056 gimple *g = gsi_stmt (gsi);
1057 if (gimple_code (g) == GIMPLE_LABEL)
1058 break;
1059
1060 if (prep_cnt == 2 || !is_gimple_assign (g))
1061 return 0;
1062
1063 tree lhs = gimple_assign_lhs (g);
1064 tree rhs1 = gimple_assign_rhs1 (g);
1065 use_operand_p use_p;
1066 gimple *use_stmt;
1067 if (TREE_CODE (lhs) != SSA_NAME
1068 || TREE_CODE (rhs1) != SSA_NAME
1069 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1070 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1071 || !single_imm_use (lhs, &use_p, &use_stmt)
1072 || use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign))
1073 return 0;
1074 switch (gimple_assign_rhs_code (g))
1075 {
1076 CASE_CONVERT:
1077 break;
1078 case PLUS_EXPR:
1079 case BIT_AND_EXPR:
1080 case BIT_IOR_EXPR:
1081 case BIT_XOR_EXPR:
1082 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1083 return 0;
1084 break;
1085 default:
1086 return 0;
1087 }
1088 prep_stmt[prep_cnt] = g;
1089 }
1090
43fbec4a 1091 /* Only transform if it removes the condition. */
1092 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1093 return 0;
1094
b6814ca0 1095 /* Size-wise, this is always profitable. */
1096 if (optimize_bb_for_speed_p (cond_bb)
1097 /* The special case is useless if it has a low probability. */
1098 && profile_status_for_fn (cfun) != PROFILE_ABSENT
720cfc43 1099 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
b6814ca0 1100 /* If assign is cheap, there is no point avoiding it. */
ae163345 1101 && estimate_num_insns (bb_seq (middle_bb), &eni_time_weights)
b6814ca0 1102 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1103 return 0;
1104
1105 tree lhs = gimple_assign_lhs (assign);
1106 tree rhs1 = gimple_assign_rhs1 (assign);
1107 tree rhs2 = gimple_assign_rhs2 (assign);
1108 enum tree_code code_def = gimple_assign_rhs_code (assign);
1109 tree cond_lhs = gimple_cond_lhs (cond);
1110 tree cond_rhs = gimple_cond_rhs (cond);
1111
ae163345 1112 /* Propagate the cond_rhs constant through preparation stmts,
1113 make sure UB isn't invoked while doing that. */
1114 for (int i = prep_cnt - 1; i >= 0; --i)
1115 {
1116 gimple *g = prep_stmt[i];
1117 tree grhs1 = gimple_assign_rhs1 (g);
1118 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1119 return 0;
1120 cond_lhs = gimple_assign_lhs (g);
1121 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1122 if (TREE_CODE (cond_rhs) != INTEGER_CST
1123 || TREE_OVERFLOW (cond_rhs))
1124 return 0;
1125 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1126 {
1127 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1128 gimple_assign_rhs2 (g));
1129 if (TREE_OVERFLOW (cond_rhs))
1130 return 0;
1131 }
1132 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1133 if (TREE_CODE (cond_rhs) != INTEGER_CST
1134 || TREE_OVERFLOW (cond_rhs))
1135 return 0;
1136 }
1137
b6814ca0 1138 if (((code == NE_EXPR && e1 == false_edge)
1139 || (code == EQ_EXPR && e1 == true_edge))
1140 && arg0 == lhs
1141 && ((arg1 == rhs1
1142 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1143 && neutral_element_p (code_def, cond_rhs, true))
1144 || (arg1 == rhs2
1145 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1146 && neutral_element_p (code_def, cond_rhs, false))
1147 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
0644c120 1148 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
2177bb9a 1149 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
0644c120 1150 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
2177bb9a 1151 && absorbing_element_p (code_def,
1152 cond_rhs, false, rhs2))))))
b6814ca0 1153 {
1154 gsi = gsi_for_stmt (cond);
4e47d7a1 1155 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1156 def-stmt in:
1157 if (n_5 != 0)
1158 goto <bb 3>;
1159 else
1160 goto <bb 4>;
1161
1162 <bb 3>:
1163 # RANGE [0, 4294967294]
1164 u_6 = n_5 + 4294967295;
1165
1166 <bb 4>:
1167 # u_3 = PHI <u_6(3), 4294967295(2)> */
1168 reset_flow_sensitive_info (lhs);
18c06fb8 1169 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1170 {
18c06fb8 1171 /* If available, we can use VR of phi result at least. */
1172 tree phires = gimple_phi_result (phi);
1173 struct range_info_def *phires_range_info
1174 = SSA_NAME_RANGE_INFO (phires);
1175 if (phires_range_info)
1176 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1177 phires_range_info);
1178 }
ae163345 1179 gimple_stmt_iterator gsi_from;
1180 for (int i = prep_cnt - 1; i >= 0; --i)
1181 {
1182 tree plhs = gimple_assign_lhs (prep_stmt[i]);
4e47d7a1 1183 reset_flow_sensitive_info (plhs);
ae163345 1184 gsi_from = gsi_for_stmt (prep_stmt[i]);
1185 gsi_move_before (&gsi_from, &gsi);
1186 }
1187 gsi_from = gsi_for_stmt (assign);
b6814ca0 1188 gsi_move_before (&gsi_from, &gsi);
1189 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1190 return 2;
1191 }
1192
fb9912ea 1193 return 0;
0beac6fc 1194}
1195
194899bf 1196/* The function minmax_replacement does the main work of doing the minmax
1197 replacement. Return true if the replacement is done. Otherwise return
1198 false.
1199 BB is the basic block where the replacement is going to be done on. ARG0
1200 is argument 0 from the PHI. Likewise for ARG1. */
1201
1202static bool
1203minmax_replacement (basic_block cond_bb, basic_block middle_bb,
42acab1c 1204 edge e0, edge e1, gimple *phi,
194899bf 1205 tree arg0, tree arg1)
1206{
1207 tree result, type;
1a91d914 1208 gcond *cond;
1209 gassign *new_stmt;
194899bf 1210 edge true_edge, false_edge;
1211 enum tree_code cmp, minmax, ass_code;
74201dc2 1212 tree smaller, alt_smaller, larger, alt_larger, arg_true, arg_false;
75a70cf9 1213 gimple_stmt_iterator gsi, gsi_from;
194899bf 1214
1215 type = TREE_TYPE (PHI_RESULT (phi));
1216
1217 /* The optimization may be unsafe due to NaNs. */
b0ab3ca0 1218 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
194899bf 1219 return false;
1220
1a91d914 1221 cond = as_a <gcond *> (last_stmt (cond_bb));
75a70cf9 1222 cmp = gimple_cond_code (cond);
194899bf 1223
1224 /* This transformation is only valid for order comparisons. Record which
1225 operand is smaller/larger if the result of the comparison is true. */
74201dc2 1226 alt_smaller = NULL_TREE;
1227 alt_larger = NULL_TREE;
194899bf 1228 if (cmp == LT_EXPR || cmp == LE_EXPR)
1229 {
75a70cf9 1230 smaller = gimple_cond_lhs (cond);
1231 larger = gimple_cond_rhs (cond);
74201dc2 1232 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1233 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1234 if (TREE_CODE (larger) == INTEGER_CST)
1235 {
1236 if (cmp == LT_EXPR)
1237 {
30b5769f 1238 wi::overflow_type overflow;
e3d0f65c 1239 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1240 TYPE_SIGN (TREE_TYPE (larger)),
74201dc2 1241 &overflow);
1242 if (! overflow)
1243 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1244 }
1245 else
1246 {
30b5769f 1247 wi::overflow_type overflow;
e3d0f65c 1248 wide_int alt = wi::add (wi::to_wide (larger), 1,
1249 TYPE_SIGN (TREE_TYPE (larger)),
74201dc2 1250 &overflow);
1251 if (! overflow)
1252 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1253 }
1254 }
194899bf 1255 }
1256 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1257 {
75a70cf9 1258 smaller = gimple_cond_rhs (cond);
1259 larger = gimple_cond_lhs (cond);
74201dc2 1260 /* If we have larger > CST it is equivalent to larger >= CST+1.
1261 Likewise larger >= CST is equivalent to larger > CST-1. */
1262 if (TREE_CODE (smaller) == INTEGER_CST)
1263 {
30b5769f 1264 wi::overflow_type overflow;
74201dc2 1265 if (cmp == GT_EXPR)
1266 {
e3d0f65c 1267 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1268 TYPE_SIGN (TREE_TYPE (smaller)),
74201dc2 1269 &overflow);
1270 if (! overflow)
1271 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1272 }
1273 else
1274 {
e3d0f65c 1275 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1276 TYPE_SIGN (TREE_TYPE (smaller)),
74201dc2 1277 &overflow);
1278 if (! overflow)
1279 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1280 }
1281 }
194899bf 1282 }
1283 else
1284 return false;
1285
1286 /* We need to know which is the true edge and which is the false
1287 edge so that we know if have abs or negative abs. */
1288 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1289
1290 /* Forward the edges over the middle basic block. */
1291 if (true_edge->dest == middle_bb)
1292 true_edge = EDGE_SUCC (true_edge->dest, 0);
1293 if (false_edge->dest == middle_bb)
1294 false_edge = EDGE_SUCC (false_edge->dest, 0);
1295
1296 if (true_edge == e0)
1297 {
1298 gcc_assert (false_edge == e1);
1299 arg_true = arg0;
1300 arg_false = arg1;
1301 }
1302 else
1303 {
1304 gcc_assert (false_edge == e0);
1305 gcc_assert (true_edge == e1);
1306 arg_true = arg1;
1307 arg_false = arg0;
1308 }
1309
1310 if (empty_block_p (middle_bb))
1311 {
74201dc2 1312 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1313 || (alt_smaller
1314 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1315 && (operand_equal_for_phi_arg_p (arg_false, larger)
1316 || (alt_larger
1317 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
194899bf 1318 {
1319 /* Case
48e1416a 1320
194899bf 1321 if (smaller < larger)
1322 rslt = smaller;
1323 else
1324 rslt = larger; */
1325 minmax = MIN_EXPR;
1326 }
74201dc2 1327 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1328 || (alt_smaller
1329 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1330 && (operand_equal_for_phi_arg_p (arg_true, larger)
1331 || (alt_larger
1332 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
194899bf 1333 minmax = MAX_EXPR;
1334 else
1335 return false;
1336 }
1337 else
1338 {
1339 /* Recognize the following case, assuming d <= u:
1340
1341 if (a <= u)
1342 b = MAX (a, d);
1343 x = PHI <b, u>
1344
1345 This is equivalent to
1346
1347 b = MAX (a, d);
1348 x = MIN (b, u); */
1349
42acab1c 1350 gimple *assign = last_and_only_stmt (middle_bb);
75a70cf9 1351 tree lhs, op0, op1, bound;
194899bf 1352
1353 if (!assign
75a70cf9 1354 || gimple_code (assign) != GIMPLE_ASSIGN)
194899bf 1355 return false;
1356
75a70cf9 1357 lhs = gimple_assign_lhs (assign);
1358 ass_code = gimple_assign_rhs_code (assign);
194899bf 1359 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1360 return false;
75a70cf9 1361 op0 = gimple_assign_rhs1 (assign);
1362 op1 = gimple_assign_rhs2 (assign);
194899bf 1363
1364 if (true_edge->src == middle_bb)
1365 {
1366 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1367 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1368 return false;
1369
74201dc2 1370 if (operand_equal_for_phi_arg_p (arg_false, larger)
1371 || (alt_larger
1372 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
194899bf 1373 {
1374 /* Case
1375
1376 if (smaller < larger)
1377 {
1378 r' = MAX_EXPR (smaller, bound)
1379 }
1380 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1381 if (ass_code != MAX_EXPR)
1382 return false;
1383
1384 minmax = MIN_EXPR;
74201dc2 1385 if (operand_equal_for_phi_arg_p (op0, smaller)
1386 || (alt_smaller
1387 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
194899bf 1388 bound = op1;
74201dc2 1389 else if (operand_equal_for_phi_arg_p (op1, smaller)
1390 || (alt_smaller
1391 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
194899bf 1392 bound = op0;
1393 else
1394 return false;
1395
1396 /* We need BOUND <= LARGER. */
49d00087 1397 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1398 bound, larger)))
194899bf 1399 return false;
1400 }
74201dc2 1401 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1402 || (alt_smaller
1403 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
194899bf 1404 {
1405 /* Case
1406
1407 if (smaller < larger)
1408 {
1409 r' = MIN_EXPR (larger, bound)
1410 }
1411 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1412 if (ass_code != MIN_EXPR)
1413 return false;
1414
1415 minmax = MAX_EXPR;
74201dc2 1416 if (operand_equal_for_phi_arg_p (op0, larger)
1417 || (alt_larger
1418 && operand_equal_for_phi_arg_p (op0, alt_larger)))
194899bf 1419 bound = op1;
74201dc2 1420 else if (operand_equal_for_phi_arg_p (op1, larger)
1421 || (alt_larger
1422 && operand_equal_for_phi_arg_p (op1, alt_larger)))
194899bf 1423 bound = op0;
1424 else
1425 return false;
1426
1427 /* We need BOUND >= SMALLER. */
49d00087 1428 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1429 bound, smaller)))
194899bf 1430 return false;
1431 }
1432 else
1433 return false;
1434 }
1435 else
1436 {
1437 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1438 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1439 return false;
1440
74201dc2 1441 if (operand_equal_for_phi_arg_p (arg_true, larger)
1442 || (alt_larger
1443 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
194899bf 1444 {
1445 /* Case
1446
1447 if (smaller > larger)
1448 {
1449 r' = MIN_EXPR (smaller, bound)
1450 }
1451 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1452 if (ass_code != MIN_EXPR)
1453 return false;
1454
1455 minmax = MAX_EXPR;
74201dc2 1456 if (operand_equal_for_phi_arg_p (op0, smaller)
1457 || (alt_smaller
1458 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
194899bf 1459 bound = op1;
74201dc2 1460 else if (operand_equal_for_phi_arg_p (op1, smaller)
1461 || (alt_smaller
1462 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
194899bf 1463 bound = op0;
1464 else
1465 return false;
1466
1467 /* We need BOUND >= LARGER. */
49d00087 1468 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1469 bound, larger)))
194899bf 1470 return false;
1471 }
74201dc2 1472 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1473 || (alt_smaller
1474 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
194899bf 1475 {
1476 /* Case
1477
1478 if (smaller > larger)
1479 {
1480 r' = MAX_EXPR (larger, bound)
1481 }
1482 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1483 if (ass_code != MAX_EXPR)
1484 return false;
1485
1486 minmax = MIN_EXPR;
1487 if (operand_equal_for_phi_arg_p (op0, larger))
1488 bound = op1;
1489 else if (operand_equal_for_phi_arg_p (op1, larger))
1490 bound = op0;
1491 else
1492 return false;
1493
1494 /* We need BOUND <= SMALLER. */
49d00087 1495 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1496 bound, smaller)))
194899bf 1497 return false;
1498 }
1499 else
1500 return false;
1501 }
1502
1503 /* Move the statement from the middle block. */
75a70cf9 1504 gsi = gsi_last_bb (cond_bb);
445a6ba5 1505 gsi_from = gsi_last_nondebug_bb (middle_bb);
4e47d7a1 1506 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
1507 SSA_OP_DEF));
75a70cf9 1508 gsi_move_before (&gsi_from, &gsi);
194899bf 1509 }
1510
9b604bc1 1511 /* Create an SSA var to hold the min/max result. If we're the only
1512 things setting the target PHI, then we can clone the PHI
1513 variable. Otherwise we must create a new one. */
1514 result = PHI_RESULT (phi);
1515 if (EDGE_COUNT (gimple_bb (phi)->preds) == 2)
1516 result = duplicate_ssa_name (result, NULL);
1517 else
1518 result = make_ssa_name (TREE_TYPE (result));
1519
194899bf 1520 /* Emit the statement to compute min/max. */
e9cf809e 1521 new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
75a70cf9 1522 gsi = gsi_last_bb (cond_bb);
1523 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
194899bf 1524
a4844041 1525 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
8c1ab18f 1526
194899bf 1527 return true;
1528}
1529
a75cc7d9 1530/* Convert
1531
1532 <bb 2>
1533 if (b_4(D) != 0)
1534 goto <bb 3>
1535 else
1536 goto <bb 4>
1537
1538 <bb 3>
1539 _2 = (unsigned long) b_4(D);
1540 _9 = __builtin_popcountl (_2);
1541 OR
1542 _9 = __builtin_popcountl (b_4(D));
1543
1544 <bb 4>
1545 c_12 = PHI <0(2), _9(3)>
1546
1547 Into
1548 <bb 2>
1549 _2 = (unsigned long) b_4(D);
1550 _9 = __builtin_popcountl (_2);
1551 OR
1552 _9 = __builtin_popcountl (b_4(D));
1553
1554 <bb 4>
1555 c_12 = PHI <_9(2)>
1556*/
1557
1558static bool
1559cond_removal_in_popcount_pattern (basic_block cond_bb, basic_block middle_bb,
1560 edge e1, edge e2,
1561 gimple *phi, tree arg0, tree arg1)
1562{
1563 gimple *cond;
1564 gimple_stmt_iterator gsi, gsi_from;
1565 gimple *popcount;
1566 gimple *cast = NULL;
1567 tree lhs, arg;
1568
1569 /* Check that
1570 _2 = (unsigned long) b_4(D);
1571 _9 = __builtin_popcountl (_2);
1572 OR
1573 _9 = __builtin_popcountl (b_4(D));
1574 are the only stmts in the middle_bb. */
1575
1576 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1577 if (gsi_end_p (gsi))
1578 return false;
1579 cast = gsi_stmt (gsi);
1580 gsi_next_nondebug (&gsi);
1581 if (!gsi_end_p (gsi))
1582 {
1583 popcount = gsi_stmt (gsi);
1584 gsi_next_nondebug (&gsi);
1585 if (!gsi_end_p (gsi))
1586 return false;
1587 }
1588 else
1589 {
1590 popcount = cast;
1591 cast = NULL;
1592 }
1593
1594 /* Check that we have a popcount builtin. */
1595 if (!is_gimple_call (popcount))
1596 return false;
1597 combined_fn cfn = gimple_call_combined_fn (popcount);
1598 switch (cfn)
1599 {
1600 CASE_CFN_POPCOUNT:
1601 break;
1602 default:
1603 return false;
1604 }
1605
1606 arg = gimple_call_arg (popcount, 0);
1607 lhs = gimple_get_lhs (popcount);
1608
1609 if (cast)
1610 {
1611 /* We have a cast stmt feeding popcount builtin. */
1612 /* Check that we have a cast prior to that. */
1613 if (gimple_code (cast) != GIMPLE_ASSIGN
1614 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
1615 return false;
1616 /* Result of the cast stmt is the argument to the builtin. */
1617 if (arg != gimple_assign_lhs (cast))
1618 return false;
1619 arg = gimple_assign_rhs1 (cast);
1620 }
1621
b91a85f5 1622 cond = last_stmt (cond_bb);
1623
1624 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount
1625 builtin. */
1626 if (gimple_code (cond) != GIMPLE_COND
1627 || (gimple_cond_code (cond) != NE_EXPR
1628 && gimple_cond_code (cond) != EQ_EXPR)
1629 || !integer_zerop (gimple_cond_rhs (cond))
1630 || arg != gimple_cond_lhs (cond))
1631 return false;
1632
a75cc7d9 1633 /* Canonicalize. */
b91a85f5 1634 if ((e2->flags & EDGE_TRUE_VALUE
1635 && gimple_cond_code (cond) == NE_EXPR)
1636 || (e1->flags & EDGE_TRUE_VALUE
1637 && gimple_cond_code (cond) == EQ_EXPR))
a75cc7d9 1638 {
1639 std::swap (arg0, arg1);
1640 std::swap (e1, e2);
1641 }
1642
1643 /* Check PHI arguments. */
1644 if (lhs != arg0 || !integer_zerop (arg1))
1645 return false;
1646
a75cc7d9 1647 /* And insert the popcount builtin and cast stmt before the cond_bb. */
1648 gsi = gsi_last_bb (cond_bb);
1649 if (cast)
1650 {
1651 gsi_from = gsi_for_stmt (cast);
1652 gsi_move_before (&gsi_from, &gsi);
1653 reset_flow_sensitive_info (gimple_get_lhs (cast));
1654 }
1655 gsi_from = gsi_for_stmt (popcount);
1656 gsi_move_before (&gsi_from, &gsi);
1657 reset_flow_sensitive_info (gimple_get_lhs (popcount));
1658
1659 /* Now update the PHI and remove unneeded bbs. */
1660 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
1661 return true;
1662}
1663
70512b93 1664/* The function absolute_replacement does the main work of doing the absolute
1665 replacement. Return true if the replacement is done. Otherwise return
1666 false.
1667 bb is the basic block where the replacement is going to be done on. arg0
f7f07c95 1668 is argument 0 from the phi. Likewise for arg1. */
33784d89 1669
70512b93 1670static bool
33784d89 1671abs_replacement (basic_block cond_bb, basic_block middle_bb,
a4844041 1672 edge e0 ATTRIBUTE_UNUSED, edge e1,
42acab1c 1673 gimple *phi, tree arg0, tree arg1)
70512b93 1674{
1675 tree result;
1a91d914 1676 gassign *new_stmt;
42acab1c 1677 gimple *cond;
75a70cf9 1678 gimple_stmt_iterator gsi;
70512b93 1679 edge true_edge, false_edge;
42acab1c 1680 gimple *assign;
70512b93 1681 edge e;
194899bf 1682 tree rhs, lhs;
70512b93 1683 bool negate;
1684 enum tree_code cond_code;
1685
1686 /* If the type says honor signed zeros we cannot do this
dac49aa5 1687 optimization. */
fe994837 1688 if (HONOR_SIGNED_ZEROS (arg1))
70512b93 1689 return false;
1690
70512b93 1691 /* OTHER_BLOCK must have only one executable statement which must have the
1692 form arg0 = -arg1 or arg1 = -arg0. */
70512b93 1693
194899bf 1694 assign = last_and_only_stmt (middle_bb);
70512b93 1695 /* If we did not find the proper negation assignment, then we can not
1696 optimize. */
1697 if (assign == NULL)
1698 return false;
48e1416a 1699
194899bf 1700 /* If we got here, then we have found the only executable statement
1701 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1702 arg1 = -arg0, then we can not optimize. */
75a70cf9 1703 if (gimple_code (assign) != GIMPLE_ASSIGN)
194899bf 1704 return false;
1705
75a70cf9 1706 lhs = gimple_assign_lhs (assign);
194899bf 1707
75a70cf9 1708 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
194899bf 1709 return false;
1710
75a70cf9 1711 rhs = gimple_assign_rhs1 (assign);
48e1416a 1712
194899bf 1713 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1714 if (!(lhs == arg0 && rhs == arg1)
1715 && !(lhs == arg1 && rhs == arg0))
1716 return false;
70512b93 1717
75a70cf9 1718 cond = last_stmt (cond_bb);
70512b93 1719 result = PHI_RESULT (phi);
1720
1721 /* Only relationals comparing arg[01] against zero are interesting. */
75a70cf9 1722 cond_code = gimple_cond_code (cond);
70512b93 1723 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1724 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1725 return false;
1726
dac49aa5 1727 /* Make sure the conditional is arg[01] OP y. */
75a70cf9 1728 if (gimple_cond_lhs (cond) != rhs)
70512b93 1729 return false;
1730
75a70cf9 1731 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1732 ? real_zerop (gimple_cond_rhs (cond))
1733 : integer_zerop (gimple_cond_rhs (cond)))
70512b93 1734 ;
1735 else
1736 return false;
1737
1738 /* We need to know which is the true edge and which is the false
1739 edge so that we know if have abs or negative abs. */
33784d89 1740 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
70512b93 1741
1742 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1743 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1744 the false edge goes to OTHER_BLOCK. */
1745 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1746 e = true_edge;
1747 else
1748 e = false_edge;
20e5647c 1749
33784d89 1750 if (e->dest == middle_bb)
70512b93 1751 negate = true;
1752 else
1753 negate = false;
20e5647c 1754
57b2dea1 1755 /* If the code negates only iff positive then make sure to not
1756 introduce undefined behavior when negating or computing the absolute.
1757 ??? We could use range info if present to check for arg1 == INT_MIN. */
1758 if (negate
1759 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1))
1760 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1))))
1761 return false;
1762
33784d89 1763 result = duplicate_ssa_name (result, NULL);
20e5647c 1764
70512b93 1765 if (negate)
f9e245b2 1766 lhs = make_ssa_name (TREE_TYPE (result));
70512b93 1767 else
1768 lhs = result;
1769
dac49aa5 1770 /* Build the modify expression with abs expression. */
e9cf809e 1771 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
70512b93 1772
75a70cf9 1773 gsi = gsi_last_bb (cond_bb);
1774 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
70512b93 1775
1776 if (negate)
1777 {
75a70cf9 1778 /* Get the right GSI. We want to insert after the recently
70512b93 1779 added ABS_EXPR statement (which we know is the first statement
1780 in the block. */
e9cf809e 1781 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
70512b93 1782
75a70cf9 1783 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
70512b93 1784 }
20e5647c 1785
a4844041 1786 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
70512b93 1787
1788 /* Note that we optimized this PHI. */
1789 return true;
1790}
1791
e6d0e152 1792/* Auxiliary functions to determine the set of memory accesses which
1793 can't trap because they are preceded by accesses to the same memory
182cf5a9 1794 portion. We do that for MEM_REFs, so we only need to track
e6d0e152 1795 the SSA_NAME of the pointer indirectly referenced. The algorithm
1796 simply is a walk over all instructions in dominator order. When
182cf5a9 1797 we see an MEM_REF we determine if we've already seen a same
e6d0e152 1798 ref anywhere up to the root of the dominator tree. If we do the
af4f74fa 1799 current access can't trap. If we don't see any dominating access
e6d0e152 1800 the current access might trap, but might also make later accesses
af4f74fa 1801 non-trapping, so we remember it. We need to be careful with loads
1802 or stores, for instance a load might not trap, while a store would,
1803 so if we see a dominating read access this doesn't mean that a later
1804 write access would not trap. Hence we also need to differentiate the
1805 type of access(es) seen.
1806
1807 ??? We currently are very conservative and assume that a load might
1808 trap even if a store doesn't (write-only memory). This probably is
1809 overly conservative. */
e6d0e152 1810
182cf5a9 1811/* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
e6d0e152 1812 through it was seen, which would constitute a no-trap region for
1813 same accesses. */
1814struct name_to_bb
1815{
963aee26 1816 unsigned int ssa_name_ver;
42540642 1817 unsigned int phase;
963aee26 1818 bool store;
1819 HOST_WIDE_INT offset, size;
e6d0e152 1820 basic_block bb;
1821};
1822
d9dd21a8 1823/* Hashtable helpers. */
1824
298e7f9a 1825struct ssa_names_hasher : free_ptr_hash <name_to_bb>
d9dd21a8 1826{
9969c043 1827 static inline hashval_t hash (const name_to_bb *);
1828 static inline bool equal (const name_to_bb *, const name_to_bb *);
d9dd21a8 1829};
e6d0e152 1830
42540642 1831/* Used for quick clearing of the hash-table when we see calls.
1832 Hash entries with phase < nt_call_phase are invalid. */
1833static unsigned int nt_call_phase;
1834
963aee26 1835/* The hash function. */
d9dd21a8 1836
1837inline hashval_t
9969c043 1838ssa_names_hasher::hash (const name_to_bb *n)
e6d0e152 1839{
963aee26 1840 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1841 ^ (n->offset << 6) ^ (n->size << 3);
e6d0e152 1842}
1843
963aee26 1844/* The equality function of *P1 and *P2. */
e6d0e152 1845
d9dd21a8 1846inline bool
9969c043 1847ssa_names_hasher::equal (const name_to_bb *n1, const name_to_bb *n2)
d9dd21a8 1848{
963aee26 1849 return n1->ssa_name_ver == n2->ssa_name_ver
1850 && n1->store == n2->store
1851 && n1->offset == n2->offset
1852 && n1->size == n2->size;
e6d0e152 1853}
1854
c1f445d2 1855class nontrapping_dom_walker : public dom_walker
1856{
1857public:
431205b7 1858 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
c1f445d2 1859 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1860
96752458 1861 virtual edge before_dom_children (basic_block);
c1f445d2 1862 virtual void after_dom_children (basic_block);
1863
1864private:
1865
1866 /* We see the expression EXP in basic block BB. If it's an interesting
1867 expression (an MEM_REF through an SSA_NAME) possibly insert the
1868 expression into the set NONTRAP or the hash table of seen expressions.
1869 STORE is true if this expression is on the LHS, otherwise it's on
1870 the RHS. */
1871 void add_or_mark_expr (basic_block, tree, bool);
1872
431205b7 1873 hash_set<tree> *m_nontrapping;
c1f445d2 1874
1875 /* The hash table for remembering what we've seen. */
1876 hash_table<ssa_names_hasher> m_seen_ssa_names;
1877};
1878
1879/* Called by walk_dominator_tree, when entering the block BB. */
96752458 1880edge
c1f445d2 1881nontrapping_dom_walker::before_dom_children (basic_block bb)
1882{
1883 edge e;
1884 edge_iterator ei;
1885 gimple_stmt_iterator gsi;
1886
1887 /* If we haven't seen all our predecessors, clear the hash-table. */
1888 FOR_EACH_EDGE (e, ei, bb->preds)
1889 if ((((size_t)e->src->aux) & 2) == 0)
1890 {
1891 nt_call_phase++;
1892 break;
1893 }
1894
1895 /* Mark this BB as being on the path to dominator root and as visited. */
1896 bb->aux = (void*)(1 | 2);
1897
1898 /* And walk the statements in order. */
1899 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1900 {
42acab1c 1901 gimple *stmt = gsi_stmt (gsi);
c1f445d2 1902
05a63389 1903 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
1904 || (is_gimple_call (stmt)
1905 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
c1f445d2 1906 nt_call_phase++;
1907 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1908 {
1909 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1910 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1911 }
1912 }
96752458 1913 return NULL;
c1f445d2 1914}
1915
1916/* Called by walk_dominator_tree, when basic block BB is exited. */
1917void
1918nontrapping_dom_walker::after_dom_children (basic_block bb)
1919{
1920 /* This BB isn't on the path to dominator root anymore. */
1921 bb->aux = (void*)2;
1922}
d9dd21a8 1923
f0b5f617 1924/* We see the expression EXP in basic block BB. If it's an interesting
182cf5a9 1925 expression (an MEM_REF through an SSA_NAME) possibly insert the
af4f74fa 1926 expression into the set NONTRAP or the hash table of seen expressions.
1927 STORE is true if this expression is on the LHS, otherwise it's on
1928 the RHS. */
c1f445d2 1929void
1930nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
e6d0e152 1931{
963aee26 1932 HOST_WIDE_INT size;
1933
182cf5a9 1934 if (TREE_CODE (exp) == MEM_REF
963aee26 1935 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
e913b5cd 1936 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
963aee26 1937 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
e6d0e152 1938 {
1939 tree name = TREE_OPERAND (exp, 0);
1940 struct name_to_bb map;
d9dd21a8 1941 name_to_bb **slot;
af4f74fa 1942 struct name_to_bb *n2bb;
e6d0e152 1943 basic_block found_bb = 0;
1944
182cf5a9 1945 /* Try to find the last seen MEM_REF through the same
e6d0e152 1946 SSA_NAME, which can trap. */
963aee26 1947 map.ssa_name_ver = SSA_NAME_VERSION (name);
42540642 1948 map.phase = 0;
e6d0e152 1949 map.bb = 0;
af4f74fa 1950 map.store = store;
e913b5cd 1951 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
963aee26 1952 map.size = size;
1953
c1f445d2 1954 slot = m_seen_ssa_names.find_slot (&map, INSERT);
d9dd21a8 1955 n2bb = *slot;
42540642 1956 if (n2bb && n2bb->phase >= nt_call_phase)
af4f74fa 1957 found_bb = n2bb->bb;
e6d0e152 1958
182cf5a9 1959 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
e6d0e152 1960 (it's in a basic block on the path from us to the dominator root)
1961 then we can't trap. */
42540642 1962 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
e6d0e152 1963 {
431205b7 1964 m_nontrapping->add (exp);
e6d0e152 1965 }
1966 else
1967 {
1968 /* EXP might trap, so insert it into the hash table. */
af4f74fa 1969 if (n2bb)
e6d0e152 1970 {
42540642 1971 n2bb->phase = nt_call_phase;
af4f74fa 1972 n2bb->bb = bb;
e6d0e152 1973 }
1974 else
1975 {
af4f74fa 1976 n2bb = XNEW (struct name_to_bb);
963aee26 1977 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
42540642 1978 n2bb->phase = nt_call_phase;
af4f74fa 1979 n2bb->bb = bb;
1980 n2bb->store = store;
963aee26 1981 n2bb->offset = map.offset;
1982 n2bb->size = size;
af4f74fa 1983 *slot = n2bb;
e6d0e152 1984 }
1985 }
1986 }
1987}
1988
e6d0e152 1989/* This is the entry point of gathering non trapping memory accesses.
1990 It will do a dominator walk over the whole function, and it will
1991 make use of the bb->aux pointers. It returns a set of trees
182cf5a9 1992 (the MEM_REFs itself) which can't trap. */
431205b7 1993static hash_set<tree> *
e6d0e152 1994get_non_trapping (void)
1995{
42540642 1996 nt_call_phase = 0;
431205b7 1997 hash_set<tree> *nontrap = new hash_set<tree>;
e6d0e152 1998 /* We're going to do a dominator walk, so ensure that we have
1999 dominance information. */
2000 calculate_dominance_info (CDI_DOMINATORS);
2001
54c91640 2002 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
2003 .walk (cfun->cfg->x_entry_block_ptr);
2004
42540642 2005 clear_aux_for_blocks ();
e6d0e152 2006 return nontrap;
2007}
2008
2009/* Do the main work of conditional store replacement. We already know
2010 that the recognized pattern looks like so:
2011
2012 split:
2013 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
2014 MIDDLE_BB:
2015 something
2016 fallthrough (edge E0)
2017 JOIN_BB:
2018 some more
2019
2020 We check that MIDDLE_BB contains only one store, that that store
2021 doesn't trap (not via NOTRAP, but via checking if an access to the same
2022 memory location dominates us) and that the store has a "simple" RHS. */
2023
2024static bool
2025cond_store_replacement (basic_block middle_bb, basic_block join_bb,
431205b7 2026 edge e0, edge e1, hash_set<tree> *nontrap)
e6d0e152 2027{
42acab1c 2028 gimple *assign = last_and_only_stmt (middle_bb);
03d37e4e 2029 tree lhs, rhs, name, name2;
1a91d914 2030 gphi *newphi;
2031 gassign *new_stmt;
75a70cf9 2032 gimple_stmt_iterator gsi;
efbcb6de 2033 source_location locus;
e6d0e152 2034
2035 /* Check if middle_bb contains of only one store. */
2036 if (!assign
6cc085b6 2037 || !gimple_assign_single_p (assign)
2038 || gimple_has_volatile_ops (assign))
e6d0e152 2039 return false;
2040
efbcb6de 2041 locus = gimple_location (assign);
75a70cf9 2042 lhs = gimple_assign_lhs (assign);
2043 rhs = gimple_assign_rhs1 (assign);
182cf5a9 2044 if (TREE_CODE (lhs) != MEM_REF
91cf53d5 2045 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
3211fa0a 2046 || !is_gimple_reg_type (TREE_TYPE (lhs)))
e6d0e152 2047 return false;
91cf53d5 2048
e6d0e152 2049 /* Prove that we can move the store down. We could also check
2050 TREE_THIS_NOTRAP here, but in that case we also could move stores,
2051 whose value is not available readily, which we want to avoid. */
431205b7 2052 if (!nontrap->contains (lhs))
e6d0e152 2053 return false;
2054
2055 /* Now we've checked the constraints, so do the transformation:
2056 1) Remove the single store. */
75a70cf9 2057 gsi = gsi_for_stmt (assign);
3211fa0a 2058 unlink_stmt_vdef (assign);
75a70cf9 2059 gsi_remove (&gsi, true);
91cf53d5 2060 release_defs (assign);
e6d0e152 2061
20496f0e 2062 /* Make both store and load use alias-set zero as we have to
2063 deal with the case of the store being a conditional change
2064 of the dynamic type. */
2065 lhs = unshare_expr (lhs);
2066 tree *basep = &lhs;
2067 while (handled_component_p (*basep))
2068 basep = &TREE_OPERAND (*basep, 0);
2069 if (TREE_CODE (*basep) == MEM_REF
2070 || TREE_CODE (*basep) == TARGET_MEM_REF)
2071 TREE_OPERAND (*basep, 1)
2072 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
2073 else
2074 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
2075 build_fold_addr_expr (*basep),
2076 build_zero_cst (ptr_type_node));
2077
03d37e4e 2078 /* 2) Insert a load from the memory of the store to the temporary
e6d0e152 2079 on the edge which did not contain the store. */
03d37e4e 2080 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2081 new_stmt = gimple_build_assign (name, lhs);
efbcb6de 2082 gimple_set_location (new_stmt, locus);
75a70cf9 2083 gsi_insert_on_edge (e1, new_stmt);
e6d0e152 2084
03d37e4e 2085 /* 3) Create a PHI node at the join block, with one argument
e6d0e152 2086 holding the old RHS, and the other holding the temporary
2087 where we stored the old memory contents. */
03d37e4e 2088 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2089 newphi = create_phi_node (name2, join_bb);
60d535d2 2090 add_phi_arg (newphi, rhs, e0, locus);
2091 add_phi_arg (newphi, name, e1, locus);
e6d0e152 2092
2093 lhs = unshare_expr (lhs);
75a70cf9 2094 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
e6d0e152 2095
03d37e4e 2096 /* 4) Insert that PHI node. */
75a70cf9 2097 gsi = gsi_after_labels (join_bb);
2098 if (gsi_end_p (gsi))
e6d0e152 2099 {
75a70cf9 2100 gsi = gsi_last_bb (join_bb);
2101 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
e6d0e152 2102 }
2103 else
75a70cf9 2104 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
e6d0e152 2105
2106 return true;
2107}
4ee9c684 2108
ec611e12 2109/* Do the main work of conditional store replacement. */
91cf53d5 2110
2111static bool
ec611e12 2112cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
42acab1c 2113 basic_block join_bb, gimple *then_assign,
2114 gimple *else_assign)
91cf53d5 2115{
03d37e4e 2116 tree lhs_base, lhs, then_rhs, else_rhs, name;
91cf53d5 2117 source_location then_locus, else_locus;
2118 gimple_stmt_iterator gsi;
1a91d914 2119 gphi *newphi;
2120 gassign *new_stmt;
91cf53d5 2121
91cf53d5 2122 if (then_assign == NULL
2123 || !gimple_assign_single_p (then_assign)
3c25489e 2124 || gimple_clobber_p (then_assign)
6cc085b6 2125 || gimple_has_volatile_ops (then_assign)
91cf53d5 2126 || else_assign == NULL
3c25489e 2127 || !gimple_assign_single_p (else_assign)
6cc085b6 2128 || gimple_clobber_p (else_assign)
2129 || gimple_has_volatile_ops (else_assign))
91cf53d5 2130 return false;
2131
2132 lhs = gimple_assign_lhs (then_assign);
2133 if (!is_gimple_reg_type (TREE_TYPE (lhs))
2134 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
2135 return false;
2136
2137 lhs_base = get_base_address (lhs);
2138 if (lhs_base == NULL_TREE
2139 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
2140 return false;
2141
2142 then_rhs = gimple_assign_rhs1 (then_assign);
2143 else_rhs = gimple_assign_rhs1 (else_assign);
2144 then_locus = gimple_location (then_assign);
2145 else_locus = gimple_location (else_assign);
2146
2147 /* Now we've checked the constraints, so do the transformation:
2148 1) Remove the stores. */
2149 gsi = gsi_for_stmt (then_assign);
2150 unlink_stmt_vdef (then_assign);
2151 gsi_remove (&gsi, true);
2152 release_defs (then_assign);
2153
2154 gsi = gsi_for_stmt (else_assign);
2155 unlink_stmt_vdef (else_assign);
2156 gsi_remove (&gsi, true);
2157 release_defs (else_assign);
2158
03d37e4e 2159 /* 2) Create a PHI node at the join block, with one argument
91cf53d5 2160 holding the old RHS, and the other holding the temporary
2161 where we stored the old memory contents. */
03d37e4e 2162 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2163 newphi = create_phi_node (name, join_bb);
60d535d2 2164 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
2165 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
91cf53d5 2166
2167 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2168
03d37e4e 2169 /* 3) Insert that PHI node. */
91cf53d5 2170 gsi = gsi_after_labels (join_bb);
2171 if (gsi_end_p (gsi))
2172 {
2173 gsi = gsi_last_bb (join_bb);
2174 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2175 }
2176 else
2177 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2178
2179 return true;
2180}
2181
33ac517c 2182/* Return the single store in BB with VDEF or NULL if there are
2183 other stores in the BB or loads following the store. */
2184
2185static gimple *
2186single_trailing_store_in_bb (basic_block bb, tree vdef)
2187{
2188 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
2189 return NULL;
2190 gimple *store = SSA_NAME_DEF_STMT (vdef);
2191 if (gimple_bb (store) != bb
2192 || gimple_code (store) == GIMPLE_PHI)
2193 return NULL;
2194
2195 /* Verify there is no other store in this BB. */
2196 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
2197 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
2198 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
2199 return NULL;
2200
2201 /* Verify there is no load or store after the store. */
2202 use_operand_p use_p;
2203 imm_use_iterator imm_iter;
2204 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
2205 if (USE_STMT (use_p) != store
2206 && gimple_bb (USE_STMT (use_p)) == bb)
2207 return NULL;
2208
2209 return store;
2210}
2211
ec611e12 2212/* Conditional store replacement. We already know
2213 that the recognized pattern looks like so:
2214
2215 split:
2216 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
2217 THEN_BB:
2218 ...
2219 X = Y;
2220 ...
2221 goto JOIN_BB;
2222 ELSE_BB:
2223 ...
2224 X = Z;
2225 ...
2226 fallthrough (edge E0)
2227 JOIN_BB:
2228 some more
2229
2230 We check that it is safe to sink the store to JOIN_BB by verifying that
2231 there are no read-after-write or write-after-write dependencies in
2232 THEN_BB and ELSE_BB. */
2233
2234static bool
2235cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
2236 basic_block join_bb)
2237{
f1f41a6c 2238 vec<data_reference_p> then_datarefs, else_datarefs;
2239 vec<ddr_p> then_ddrs, else_ddrs;
42acab1c 2240 gimple *then_store, *else_store;
ec611e12 2241 bool found, ok = false, res;
2242 struct data_dependence_relation *ddr;
2243 data_reference_p then_dr, else_dr;
2244 int i, j;
2245 tree then_lhs, else_lhs;
ec611e12 2246 basic_block blocks[3];
2247
33ac517c 2248 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
2249 cheap enough to always handle as it allows us to elide dependence
2250 checking. */
2251 gphi *vphi = NULL;
2252 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
2253 gsi_next (&si))
2254 if (virtual_operand_p (gimple_phi_result (si.phi ())))
2255 {
2256 vphi = si.phi ();
2257 break;
2258 }
2259 if (!vphi)
ec611e12 2260 return false;
33ac517c 2261 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
2262 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
2263 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
2264 if (then_assign)
2265 {
2266 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
2267 if (else_assign)
2268 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2269 then_assign, else_assign);
2270 }
ec611e12 2271
33ac517c 2272 if (MAX_STORES_TO_SINK == 0)
2273 return false;
ec611e12 2274
2275 /* Find data references. */
f1f41a6c 2276 then_datarefs.create (1);
2277 else_datarefs.create (1);
ec611e12 2278 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
2279 == chrec_dont_know)
f1f41a6c 2280 || !then_datarefs.length ()
ec611e12 2281 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
c71d3c24 2282 == chrec_dont_know)
f1f41a6c 2283 || !else_datarefs.length ())
ec611e12 2284 {
2285 free_data_refs (then_datarefs);
2286 free_data_refs (else_datarefs);
2287 return false;
2288 }
2289
2290 /* Find pairs of stores with equal LHS. */
42acab1c 2291 auto_vec<gimple *, 1> then_stores, else_stores;
f1f41a6c 2292 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
ec611e12 2293 {
2294 if (DR_IS_READ (then_dr))
2295 continue;
2296
2297 then_store = DR_STMT (then_dr);
728dcc71 2298 then_lhs = gimple_get_lhs (then_store);
c71d3c24 2299 if (then_lhs == NULL_TREE)
2300 continue;
ec611e12 2301 found = false;
2302
f1f41a6c 2303 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
ec611e12 2304 {
2305 if (DR_IS_READ (else_dr))
2306 continue;
2307
2308 else_store = DR_STMT (else_dr);
728dcc71 2309 else_lhs = gimple_get_lhs (else_store);
c71d3c24 2310 if (else_lhs == NULL_TREE)
2311 continue;
ec611e12 2312
2313 if (operand_equal_p (then_lhs, else_lhs, 0))
2314 {
2315 found = true;
2316 break;
2317 }
2318 }
2319
2320 if (!found)
2321 continue;
2322
f1f41a6c 2323 then_stores.safe_push (then_store);
2324 else_stores.safe_push (else_store);
ec611e12 2325 }
2326
2327 /* No pairs of stores found. */
f1f41a6c 2328 if (!then_stores.length ()
2329 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
ec611e12 2330 {
2331 free_data_refs (then_datarefs);
2332 free_data_refs (else_datarefs);
ec611e12 2333 return false;
2334 }
2335
2336 /* Compute and check data dependencies in both basic blocks. */
f1f41a6c 2337 then_ddrs.create (1);
2338 else_ddrs.create (1);
2339 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1e094109 2340 vNULL, false)
f1f41a6c 2341 || !compute_all_dependences (else_datarefs, &else_ddrs,
1e094109 2342 vNULL, false))
8b3fb720 2343 {
2344 free_dependence_relations (then_ddrs);
2345 free_dependence_relations (else_ddrs);
2346 free_data_refs (then_datarefs);
2347 free_data_refs (else_datarefs);
8b3fb720 2348 return false;
2349 }
ec611e12 2350 blocks[0] = then_bb;
2351 blocks[1] = else_bb;
2352 blocks[2] = join_bb;
2353 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
2354
2355 /* Check that there are no read-after-write or write-after-write dependencies
2356 in THEN_BB. */
f1f41a6c 2357 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
ec611e12 2358 {
2359 struct data_reference *dra = DDR_A (ddr);
2360 struct data_reference *drb = DDR_B (ddr);
2361
2362 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2363 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2364 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2365 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2366 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2367 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2368 {
2369 free_dependence_relations (then_ddrs);
2370 free_dependence_relations (else_ddrs);
2473bfb7 2371 free_data_refs (then_datarefs);
2372 free_data_refs (else_datarefs);
ec611e12 2373 return false;
2374 }
2375 }
2376
2377 /* Check that there are no read-after-write or write-after-write dependencies
2378 in ELSE_BB. */
f1f41a6c 2379 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
ec611e12 2380 {
2381 struct data_reference *dra = DDR_A (ddr);
2382 struct data_reference *drb = DDR_B (ddr);
2383
2384 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2385 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2386 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2387 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2388 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2389 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2390 {
2391 free_dependence_relations (then_ddrs);
2392 free_dependence_relations (else_ddrs);
2473bfb7 2393 free_data_refs (then_datarefs);
2394 free_data_refs (else_datarefs);
ec611e12 2395 return false;
2396 }
2397 }
2398
2399 /* Sink stores with same LHS. */
f1f41a6c 2400 FOR_EACH_VEC_ELT (then_stores, i, then_store)
ec611e12 2401 {
f1f41a6c 2402 else_store = else_stores[i];
ec611e12 2403 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2404 then_store, else_store);
2405 ok = ok || res;
2406 }
2407
2408 free_dependence_relations (then_ddrs);
2409 free_dependence_relations (else_ddrs);
2473bfb7 2410 free_data_refs (then_datarefs);
2411 free_data_refs (else_datarefs);
ec611e12 2412
2413 return ok;
2414}
2415
239e9670 2416/* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2417
2418static bool
42acab1c 2419local_mem_dependence (gimple *stmt, basic_block bb)
239e9670 2420{
2421 tree vuse = gimple_vuse (stmt);
42acab1c 2422 gimple *def;
239e9670 2423
2424 if (!vuse)
2425 return false;
2426
2427 def = SSA_NAME_DEF_STMT (vuse);
2428 return (def && gimple_bb (def) == bb);
2429}
2430
2431/* Given a "diamond" control-flow pattern where BB0 tests a condition,
2432 BB1 and BB2 are "then" and "else" blocks dependent on this test,
f32420fb 2433 and BB3 rejoins control flow following BB1 and BB2, look for
239e9670 2434 opportunities to hoist loads as follows. If BB3 contains a PHI of
2435 two loads, one each occurring in BB1 and BB2, and the loads are
2436 provably of adjacent fields in the same structure, then move both
2437 loads into BB0. Of course this can only be done if there are no
2438 dependencies preventing such motion.
2439
2440 One of the hoisted loads will always be speculative, so the
2441 transformation is currently conservative:
2442
2443 - The fields must be strictly adjacent.
2444 - The two fields must occupy a single memory block that is
2445 guaranteed to not cross a page boundary.
2446
2447 The last is difficult to prove, as such memory blocks should be
2448 aligned on the minimum of the stack alignment boundary and the
2449 alignment guaranteed by heap allocation interfaces. Thus we rely
2450 on a parameter for the alignment value.
2451
2452 Provided a good value is used for the last case, the first
2453 restriction could possibly be relaxed. */
2454
2455static void
2456hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2457 basic_block bb2, basic_block bb3)
2458{
2459 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2460 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
1a91d914 2461 gphi_iterator gsi;
239e9670 2462
2463 /* Walk the phis in bb3 looking for an opportunity. We are looking
2464 for phis of two SSA names, one each of which is defined in bb1 and
2465 bb2. */
2466 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2467 {
1a91d914 2468 gphi *phi_stmt = gsi.phi ();
42acab1c 2469 gimple *def1, *def2;
dfcf26a5 2470 tree arg1, arg2, ref1, ref2, field1, field2;
239e9670 2471 tree tree_offset1, tree_offset2, tree_size2, next;
2472 int offset1, offset2, size2;
2473 unsigned align1;
2474 gimple_stmt_iterator gsi2;
2475 basic_block bb_for_def1, bb_for_def2;
2476
7c782c9b 2477 if (gimple_phi_num_args (phi_stmt) != 2
2478 || virtual_operand_p (gimple_phi_result (phi_stmt)))
239e9670 2479 continue;
2480
2481 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2482 arg2 = gimple_phi_arg_def (phi_stmt, 1);
f32420fb 2483
239e9670 2484 if (TREE_CODE (arg1) != SSA_NAME
2485 || TREE_CODE (arg2) != SSA_NAME
2486 || SSA_NAME_IS_DEFAULT_DEF (arg1)
7c782c9b 2487 || SSA_NAME_IS_DEFAULT_DEF (arg2))
239e9670 2488 continue;
2489
2490 def1 = SSA_NAME_DEF_STMT (arg1);
2491 def2 = SSA_NAME_DEF_STMT (arg2);
2492
2493 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2494 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2495 continue;
2496
2497 /* Check the mode of the arguments to be sure a conditional move
2498 can be generated for it. */
935611bc 2499 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2500 == CODE_FOR_nothing)
239e9670 2501 continue;
2502
2503 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2504 if (!gimple_assign_single_p (def1)
6cc085b6 2505 || !gimple_assign_single_p (def2)
2506 || gimple_has_volatile_ops (def1)
2507 || gimple_has_volatile_ops (def2))
239e9670 2508 continue;
2509
2510 ref1 = gimple_assign_rhs1 (def1);
2511 ref2 = gimple_assign_rhs1 (def2);
2512
2513 if (TREE_CODE (ref1) != COMPONENT_REF
2514 || TREE_CODE (ref2) != COMPONENT_REF)
2515 continue;
2516
2517 /* The zeroth operand of the two component references must be
2518 identical. It is not sufficient to compare get_base_address of
2519 the two references, because this could allow for different
2520 elements of the same array in the two trees. It is not safe to
2521 assume that the existence of one array element implies the
2522 existence of a different one. */
2523 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2524 continue;
2525
2526 field1 = TREE_OPERAND (ref1, 1);
2527 field2 = TREE_OPERAND (ref2, 1);
2528
2529 /* Check for field adjacency, and ensure field1 comes first. */
2530 for (next = DECL_CHAIN (field1);
2531 next && TREE_CODE (next) != FIELD_DECL;
2532 next = DECL_CHAIN (next))
2533 ;
2534
2535 if (next != field2)
2536 {
2537 for (next = DECL_CHAIN (field2);
2538 next && TREE_CODE (next) != FIELD_DECL;
2539 next = DECL_CHAIN (next))
2540 ;
2541
2542 if (next != field1)
2543 continue;
2544
dfcf26a5 2545 std::swap (field1, field2);
2546 std::swap (def1, def2);
239e9670 2547 }
2548
7c74ee50 2549 bb_for_def1 = gimple_bb (def1);
2550 bb_for_def2 = gimple_bb (def2);
2551
239e9670 2552 /* Check for proper alignment of the first field. */
2553 tree_offset1 = bit_position (field1);
2554 tree_offset2 = bit_position (field2);
2555 tree_size2 = DECL_SIZE (field2);
2556
e913b5cd 2557 if (!tree_fits_uhwi_p (tree_offset1)
2558 || !tree_fits_uhwi_p (tree_offset2)
2559 || !tree_fits_uhwi_p (tree_size2))
239e9670 2560 continue;
2561
e913b5cd 2562 offset1 = tree_to_uhwi (tree_offset1);
2563 offset2 = tree_to_uhwi (tree_offset2);
2564 size2 = tree_to_uhwi (tree_size2);
239e9670 2565 align1 = DECL_ALIGN (field1) % param_align_bits;
2566
2567 if (offset1 % BITS_PER_UNIT != 0)
2568 continue;
2569
2570 /* For profitability, the two field references should fit within
2571 a single cache line. */
2572 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2573 continue;
2574
2575 /* The two expressions cannot be dependent upon vdefs defined
2576 in bb1/bb2. */
2577 if (local_mem_dependence (def1, bb_for_def1)
2578 || local_mem_dependence (def2, bb_for_def2))
2579 continue;
2580
2581 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2582 bb0. We hoist the first one first so that a cache miss is handled
2583 efficiently regardless of hardware cache-fill policy. */
2584 gsi2 = gsi_for_stmt (def1);
2585 gsi_move_to_bb_end (&gsi2, bb0);
2586 gsi2 = gsi_for_stmt (def2);
2587 gsi_move_to_bb_end (&gsi2, bb0);
2588
2589 if (dump_file && (dump_flags & TDF_DETAILS))
2590 {
2591 fprintf (dump_file,
2592 "\nHoisting adjacent loads from %d and %d into %d: \n",
2593 bb_for_def1->index, bb_for_def2->index, bb0->index);
2594 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2595 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2596 }
2597 }
2598}
2599
2600/* Determine whether we should attempt to hoist adjacent loads out of
2601 diamond patterns in pass_phiopt. Always hoist loads if
2602 -fhoist-adjacent-loads is specified and the target machine has
6f0ddab1 2603 both a conditional move instruction and a defined cache line size. */
239e9670 2604
2605static bool
2606gate_hoist_loads (void)
2607{
6f0ddab1 2608 return (flag_hoist_adjacent_loads == 1
2609 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2610 && HAVE_conditional_move);
239e9670 2611}
2612
65b0537f 2613/* This pass tries to replaces an if-then-else block with an
2614 assignment. We have four kinds of transformations. Some of these
2615 transformations are also performed by the ifcvt RTL optimizer.
2616
2617 Conditional Replacement
2618 -----------------------
2619
2620 This transformation, implemented in conditional_replacement,
2621 replaces
2622
2623 bb0:
2624 if (cond) goto bb2; else goto bb1;
2625 bb1:
2626 bb2:
2627 x = PHI <0 (bb1), 1 (bb0), ...>;
2628
2629 with
2630
2631 bb0:
2632 x' = cond;
2633 goto bb2;
2634 bb2:
2635 x = PHI <x' (bb0), ...>;
2636
2637 We remove bb1 as it becomes unreachable. This occurs often due to
2638 gimplification of conditionals.
2639
2640 Value Replacement
2641 -----------------
2642
2643 This transformation, implemented in value_replacement, replaces
2644
2645 bb0:
2646 if (a != b) goto bb2; else goto bb1;
2647 bb1:
2648 bb2:
2649 x = PHI <a (bb1), b (bb0), ...>;
2650
2651 with
2652
2653 bb0:
2654 bb2:
2655 x = PHI <b (bb0), ...>;
2656
2657 This opportunity can sometimes occur as a result of other
2658 optimizations.
2659
2660
2661 Another case caught by value replacement looks like this:
2662
2663 bb0:
2664 t1 = a == CONST;
2665 t2 = b > c;
2666 t3 = t1 & t2;
2667 if (t3 != 0) goto bb1; else goto bb2;
2668 bb1:
2669 bb2:
2670 x = PHI (CONST, a)
2671
2672 Gets replaced with:
2673 bb0:
2674 bb2:
2675 t1 = a == CONST;
2676 t2 = b > c;
2677 t3 = t1 & t2;
2678 x = a;
2679
2680 ABS Replacement
2681 ---------------
2682
2683 This transformation, implemented in abs_replacement, replaces
2684
2685 bb0:
2686 if (a >= 0) goto bb2; else goto bb1;
2687 bb1:
2688 x = -a;
2689 bb2:
2690 x = PHI <x (bb1), a (bb0), ...>;
2691
2692 with
2693
2694 bb0:
2695 x' = ABS_EXPR< a >;
2696 bb2:
2697 x = PHI <x' (bb0), ...>;
2698
2699 MIN/MAX Replacement
2700 -------------------
2701
2702 This transformation, minmax_replacement replaces
2703
2704 bb0:
2705 if (a <= b) goto bb2; else goto bb1;
2706 bb1:
2707 bb2:
2708 x = PHI <b (bb1), a (bb0), ...>;
2709
2710 with
2711
2712 bb0:
2713 x' = MIN_EXPR (a, b)
2714 bb2:
2715 x = PHI <x' (bb0), ...>;
2716
2717 A similar transformation is done for MAX_EXPR.
2718
2719
2720 This pass also performs a fifth transformation of a slightly different
2721 flavor.
2722
29a78fec 2723 Factor conversion in COND_EXPR
2724 ------------------------------
2725
2726 This transformation factors the conversion out of COND_EXPR with
2727 factor_out_conditional_conversion.
2728
2729 For example:
2730 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2731 <bb 3>:
2732 tmp = (int) a;
2733 <bb 4>:
2734 tmp = PHI <tmp, CST>
2735
2736 Into:
2737 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2738 <bb 3>:
2739 <bb 4>:
2740 a = PHI <a, CST>
2741 tmp = (int) a;
2742
65b0537f 2743 Adjacent Load Hoisting
2744 ----------------------
2745
2746 This transformation replaces
2747
2748 bb0:
2749 if (...) goto bb2; else goto bb1;
2750 bb1:
2751 x1 = (<expr>).field1;
2752 goto bb3;
2753 bb2:
2754 x2 = (<expr>).field2;
2755 bb3:
2756 # x = PHI <x1, x2>;
2757
2758 with
2759
2760 bb0:
2761 x1 = (<expr>).field1;
2762 x2 = (<expr>).field2;
2763 if (...) goto bb2; else goto bb1;
2764 bb1:
2765 goto bb3;
2766 bb2:
2767 bb3:
2768 # x = PHI <x1, x2>;
2769
2770 The purpose of this transformation is to enable generation of conditional
2771 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2772 the loads is speculative, the transformation is restricted to very
2773 specific cases to avoid introducing a page fault. We are looking for
2774 the common idiom:
2775
2776 if (...)
2777 x = y->left;
2778 else
2779 x = y->right;
2780
2781 where left and right are typically adjacent pointers in a tree structure. */
20e5647c 2782
cbe8bda8 2783namespace {
2784
2785const pass_data pass_data_phiopt =
4ee9c684 2786{
cbe8bda8 2787 GIMPLE_PASS, /* type */
2788 "phiopt", /* name */
2789 OPTGROUP_NONE, /* optinfo_flags */
cbe8bda8 2790 TV_TREE_PHIOPT, /* tv_id */
2791 ( PROP_cfg | PROP_ssa ), /* properties_required */
2792 0, /* properties_provided */
2793 0, /* properties_destroyed */
2794 0, /* todo_flags_start */
8b88439e 2795 0, /* todo_flags_finish */
4ee9c684 2796};
e6d0e152 2797
cbe8bda8 2798class pass_phiopt : public gimple_opt_pass
2799{
2800public:
9af5ce0c 2801 pass_phiopt (gcc::context *ctxt)
a2c2432d 2802 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
cbe8bda8 2803 {}
2804
2805 /* opt_pass methods: */
ae84f584 2806 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
a2c2432d 2807 void set_pass_param (unsigned n, bool param)
2808 {
2809 gcc_assert (n == 0);
2810 early_p = param;
2811 }
3dac50cc 2812 virtual bool gate (function *) { return flag_ssa_phiopt; }
65b0537f 2813 virtual unsigned int execute (function *)
2814 {
a2c2432d 2815 return tree_ssa_phiopt_worker (false,
2816 !early_p ? gate_hoist_loads () : false,
2817 early_p);
65b0537f 2818 }
cbe8bda8 2819
a2c2432d 2820private:
2821 bool early_p;
cbe8bda8 2822}; // class pass_phiopt
2823
2824} // anon namespace
2825
2826gimple_opt_pass *
2827make_pass_phiopt (gcc::context *ctxt)
2828{
2829 return new pass_phiopt (ctxt);
2830}
2831
cbe8bda8 2832namespace {
2833
2834const pass_data pass_data_cselim =
e6d0e152 2835{
cbe8bda8 2836 GIMPLE_PASS, /* type */
2837 "cselim", /* name */
2838 OPTGROUP_NONE, /* optinfo_flags */
cbe8bda8 2839 TV_TREE_PHIOPT, /* tv_id */
2840 ( PROP_cfg | PROP_ssa ), /* properties_required */
2841 0, /* properties_provided */
2842 0, /* properties_destroyed */
2843 0, /* todo_flags_start */
8b88439e 2844 0, /* todo_flags_finish */
e6d0e152 2845};
cbe8bda8 2846
2847class pass_cselim : public gimple_opt_pass
2848{
2849public:
9af5ce0c 2850 pass_cselim (gcc::context *ctxt)
2851 : gimple_opt_pass (pass_data_cselim, ctxt)
cbe8bda8 2852 {}
2853
2854 /* opt_pass methods: */
31315c24 2855 virtual bool gate (function *) { return flag_tree_cselim; }
65b0537f 2856 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
cbe8bda8 2857
2858}; // class pass_cselim
2859
2860} // anon namespace
2861
2862gimple_opt_pass *
2863make_pass_cselim (gcc::context *ctxt)
2864{
2865 return new pass_cselim (ctxt);
2866}