]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-phiopt.c
phiopt: Fix up two_value_replacement BOOLEAN_TYPE handling for Ada [PR98188]
[thirdparty/gcc.git] / gcc / tree-ssa-phiopt.c
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2020 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
36 #include "cfganal.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
40 #include "tree-cfg.h"
41 #include "tree-dfa.h"
42 #include "domwalk.h"
43 #include "cfgloop.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "case-cfn-macros.h"
48 #include "tree-eh.h"
49 #include "gimple-fold.h"
50 #include "internal-fn.h"
51
52 static unsigned int tree_ssa_phiopt_worker (bool, bool, bool);
53 static bool two_value_replacement (basic_block, basic_block, edge, gphi *,
54 tree, tree);
55 static bool conditional_replacement (basic_block, basic_block,
56 edge, edge, gphi *, tree, tree);
57 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree,
58 gimple *);
59 static int value_replacement (basic_block, basic_block,
60 edge, edge, gimple *, tree, tree);
61 static bool minmax_replacement (basic_block, basic_block,
62 edge, edge, gimple *, tree, tree);
63 static bool abs_replacement (basic_block, basic_block,
64 edge, edge, gimple *, tree, tree);
65 static bool cond_removal_in_popcount_clz_ctz_pattern (basic_block, basic_block,
66 edge, edge, gimple *,
67 tree, tree);
68 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
69 hash_set<tree> *);
70 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
71 static hash_set<tree> * get_non_trapping ();
72 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
73 static void hoist_adjacent_loads (basic_block, basic_block,
74 basic_block, basic_block);
75 static bool gate_hoist_loads (void);
76
77 /* This pass tries to transform conditional stores into unconditional
78 ones, enabling further simplifications with the simpler then and else
79 blocks. In particular it replaces this:
80
81 bb0:
82 if (cond) goto bb2; else goto bb1;
83 bb1:
84 *p = RHS;
85 bb2:
86
87 with
88
89 bb0:
90 if (cond) goto bb1; else goto bb2;
91 bb1:
92 condtmp' = *p;
93 bb2:
94 condtmp = PHI <RHS, condtmp'>
95 *p = condtmp;
96
97 This transformation can only be done under several constraints,
98 documented below. It also replaces:
99
100 bb0:
101 if (cond) goto bb2; else goto bb1;
102 bb1:
103 *p = RHS1;
104 goto bb3;
105 bb2:
106 *p = RHS2;
107 bb3:
108
109 with
110
111 bb0:
112 if (cond) goto bb3; else goto bb1;
113 bb1:
114 bb3:
115 condtmp = PHI <RHS1, RHS2>
116 *p = condtmp; */
117
118 static unsigned int
119 tree_ssa_cs_elim (void)
120 {
121 unsigned todo;
122 /* ??? We are not interested in loop related info, but the following
123 will create it, ICEing as we didn't init loops with pre-headers.
124 An interfacing issue of find_data_references_in_bb. */
125 loop_optimizer_init (LOOPS_NORMAL);
126 scev_initialize ();
127 todo = tree_ssa_phiopt_worker (true, false, false);
128 scev_finalize ();
129 loop_optimizer_finalize ();
130 return todo;
131 }
132
133 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
134
135 static gphi *
136 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
137 {
138 gimple_stmt_iterator i;
139 gphi *phi = NULL;
140 if (gimple_seq_singleton_p (seq))
141 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
142 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
143 {
144 gphi *p = as_a <gphi *> (gsi_stmt (i));
145 /* If the PHI arguments are equal then we can skip this PHI. */
146 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
147 gimple_phi_arg_def (p, e1->dest_idx)))
148 continue;
149
150 /* If we already have a PHI that has the two edge arguments are
151 different, then return it is not a singleton for these PHIs. */
152 if (phi)
153 return NULL;
154
155 phi = p;
156 }
157 return phi;
158 }
159
160 /* The core routine of conditional store replacement and normal
161 phi optimizations. Both share much of the infrastructure in how
162 to match applicable basic block patterns. DO_STORE_ELIM is true
163 when we want to do conditional store replacement, false otherwise.
164 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
165 of diamond control flow patterns, false otherwise. */
166 static unsigned int
167 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p)
168 {
169 basic_block bb;
170 basic_block *bb_order;
171 unsigned n, i;
172 bool cfgchanged = false;
173 hash_set<tree> *nontrap = 0;
174
175 if (do_store_elim)
176 /* Calculate the set of non-trapping memory accesses. */
177 nontrap = get_non_trapping ();
178
179 /* Search every basic block for COND_EXPR we may be able to optimize.
180
181 We walk the blocks in order that guarantees that a block with
182 a single predecessor is processed before the predecessor.
183 This ensures that we collapse inner ifs before visiting the
184 outer ones, and also that we do not try to visit a removed
185 block. */
186 bb_order = single_pred_before_succ_order ();
187 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
188
189 for (i = 0; i < n; i++)
190 {
191 gimple *cond_stmt;
192 gphi *phi;
193 basic_block bb1, bb2;
194 edge e1, e2;
195 tree arg0, arg1;
196
197 bb = bb_order[i];
198
199 cond_stmt = last_stmt (bb);
200 /* Check to see if the last statement is a GIMPLE_COND. */
201 if (!cond_stmt
202 || gimple_code (cond_stmt) != GIMPLE_COND)
203 continue;
204
205 e1 = EDGE_SUCC (bb, 0);
206 bb1 = e1->dest;
207 e2 = EDGE_SUCC (bb, 1);
208 bb2 = e2->dest;
209
210 /* We cannot do the optimization on abnormal edges. */
211 if ((e1->flags & EDGE_ABNORMAL) != 0
212 || (e2->flags & EDGE_ABNORMAL) != 0)
213 continue;
214
215 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
216 if (EDGE_COUNT (bb1->succs) == 0
217 || bb2 == NULL
218 || EDGE_COUNT (bb2->succs) == 0)
219 continue;
220
221 /* Find the bb which is the fall through to the other. */
222 if (EDGE_SUCC (bb1, 0)->dest == bb2)
223 ;
224 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
225 {
226 std::swap (bb1, bb2);
227 std::swap (e1, e2);
228 }
229 else if (do_store_elim
230 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
231 {
232 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
233
234 if (!single_succ_p (bb1)
235 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
236 || !single_succ_p (bb2)
237 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
238 || EDGE_COUNT (bb3->preds) != 2)
239 continue;
240 if (cond_if_else_store_replacement (bb1, bb2, bb3))
241 cfgchanged = true;
242 continue;
243 }
244 else if (do_hoist_loads
245 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
246 {
247 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
248
249 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
250 && single_succ_p (bb1)
251 && single_succ_p (bb2)
252 && single_pred_p (bb1)
253 && single_pred_p (bb2)
254 && EDGE_COUNT (bb->succs) == 2
255 && EDGE_COUNT (bb3->preds) == 2
256 /* If one edge or the other is dominant, a conditional move
257 is likely to perform worse than the well-predicted branch. */
258 && !predictable_edge_p (EDGE_SUCC (bb, 0))
259 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
260 hoist_adjacent_loads (bb, bb1, bb2, bb3);
261 continue;
262 }
263 else
264 continue;
265
266 e1 = EDGE_SUCC (bb1, 0);
267
268 /* Make sure that bb1 is just a fall through. */
269 if (!single_succ_p (bb1)
270 || (e1->flags & EDGE_FALLTHRU) == 0)
271 continue;
272
273 /* Also make sure that bb1 only have one predecessor and that it
274 is bb. */
275 if (!single_pred_p (bb1)
276 || single_pred (bb1) != bb)
277 continue;
278
279 if (do_store_elim)
280 {
281 /* bb1 is the middle block, bb2 the join block, bb the split block,
282 e1 the fallthrough edge from bb1 to bb2. We can't do the
283 optimization if the join block has more than two predecessors. */
284 if (EDGE_COUNT (bb2->preds) > 2)
285 continue;
286 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
287 cfgchanged = true;
288 }
289 else
290 {
291 gimple_seq phis = phi_nodes (bb2);
292 gimple_stmt_iterator gsi;
293 bool candorest = true;
294
295 /* Value replacement can work with more than one PHI
296 so try that first. */
297 if (!early_p)
298 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
299 {
300 phi = as_a <gphi *> (gsi_stmt (gsi));
301 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
302 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
303 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
304 {
305 candorest = false;
306 cfgchanged = true;
307 break;
308 }
309 }
310
311 if (!candorest)
312 continue;
313
314 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
315 if (!phi)
316 continue;
317
318 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
319 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
320
321 /* Something is wrong if we cannot find the arguments in the PHI
322 node. */
323 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
324
325 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
326 arg0, arg1,
327 cond_stmt);
328 if (newphi != NULL)
329 {
330 phi = newphi;
331 /* factor_out_conditional_conversion may create a new PHI in
332 BB2 and eliminate an existing PHI in BB2. Recompute values
333 that may be affected by that change. */
334 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
335 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
336 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
337 }
338
339 /* Do the replacement of conditional if it can be done. */
340 if (!early_p && two_value_replacement (bb, bb1, e2, phi, arg0, arg1))
341 cfgchanged = true;
342 else if (!early_p
343 && conditional_replacement (bb, bb1, e1, e2, phi,
344 arg0, arg1))
345 cfgchanged = true;
346 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
347 cfgchanged = true;
348 else if (!early_p
349 && cond_removal_in_popcount_clz_ctz_pattern (bb, bb1, e1,
350 e2, phi, arg0,
351 arg1))
352 cfgchanged = true;
353 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
354 cfgchanged = true;
355 }
356 }
357
358 free (bb_order);
359
360 if (do_store_elim)
361 delete nontrap;
362 /* If the CFG has changed, we should cleanup the CFG. */
363 if (cfgchanged && do_store_elim)
364 {
365 /* In cond-store replacement we have added some loads on edges
366 and new VOPS (as we moved the store, and created a load). */
367 gsi_commit_edge_inserts ();
368 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
369 }
370 else if (cfgchanged)
371 return TODO_cleanup_cfg;
372 return 0;
373 }
374
375 /* Replace PHI node element whose edge is E in block BB with variable NEW.
376 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
377 is known to have two edges, one of which must reach BB). */
378
379 static void
380 replace_phi_edge_with_variable (basic_block cond_block,
381 edge e, gimple *phi, tree new_tree)
382 {
383 basic_block bb = gimple_bb (phi);
384 basic_block block_to_remove;
385 gimple_stmt_iterator gsi;
386
387 /* Change the PHI argument to new. */
388 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
389
390 /* Remove the empty basic block. */
391 if (EDGE_SUCC (cond_block, 0)->dest == bb)
392 {
393 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
394 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
395 EDGE_SUCC (cond_block, 0)->probability = profile_probability::always ();
396
397 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
398 }
399 else
400 {
401 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
402 EDGE_SUCC (cond_block, 1)->flags
403 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
404 EDGE_SUCC (cond_block, 1)->probability = profile_probability::always ();
405
406 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
407 }
408 delete_basic_block (block_to_remove);
409
410 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
411 gsi = gsi_last_bb (cond_block);
412 gsi_remove (&gsi, true);
413
414 if (dump_file && (dump_flags & TDF_DETAILS))
415 fprintf (dump_file,
416 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
417 cond_block->index,
418 bb->index);
419 }
420
421 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
422 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
423 to the result of PHI stmt. COND_STMT is the controlling predicate.
424 Return the newly-created PHI, if any. */
425
426 static gphi *
427 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
428 tree arg0, tree arg1, gimple *cond_stmt)
429 {
430 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
431 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
432 tree temp, result;
433 gphi *newphi;
434 gimple_stmt_iterator gsi, gsi_for_def;
435 location_t locus = gimple_location (phi);
436 enum tree_code convert_code;
437
438 /* Handle only PHI statements with two arguments. TODO: If all
439 other arguments to PHI are INTEGER_CST or if their defining
440 statement have the same unary operation, we can handle more
441 than two arguments too. */
442 if (gimple_phi_num_args (phi) != 2)
443 return NULL;
444
445 /* First canonicalize to simplify tests. */
446 if (TREE_CODE (arg0) != SSA_NAME)
447 {
448 std::swap (arg0, arg1);
449 std::swap (e0, e1);
450 }
451
452 if (TREE_CODE (arg0) != SSA_NAME
453 || (TREE_CODE (arg1) != SSA_NAME
454 && TREE_CODE (arg1) != INTEGER_CST))
455 return NULL;
456
457 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
458 a conversion. */
459 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
460 if (!gimple_assign_cast_p (arg0_def_stmt))
461 return NULL;
462
463 /* Use the RHS as new_arg0. */
464 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
465 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
466 if (convert_code == VIEW_CONVERT_EXPR)
467 {
468 new_arg0 = TREE_OPERAND (new_arg0, 0);
469 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
470 return NULL;
471 }
472
473 if (TREE_CODE (arg1) == SSA_NAME)
474 {
475 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
476 is a conversion. */
477 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
478 if (!is_gimple_assign (arg1_def_stmt)
479 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
480 return NULL;
481
482 /* Use the RHS as new_arg1. */
483 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
484 if (convert_code == VIEW_CONVERT_EXPR)
485 new_arg1 = TREE_OPERAND (new_arg1, 0);
486 }
487 else
488 {
489 /* If arg1 is an INTEGER_CST, fold it to new type. */
490 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
491 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
492 {
493 if (gimple_assign_cast_p (arg0_def_stmt))
494 {
495 /* For the INTEGER_CST case, we are just moving the
496 conversion from one place to another, which can often
497 hurt as the conversion moves further away from the
498 statement that computes the value. So, perform this
499 only if new_arg0 is an operand of COND_STMT, or
500 if arg0_def_stmt is the only non-debug stmt in
501 its basic block, because then it is possible this
502 could enable further optimizations (minmax replacement
503 etc.). See PR71016. */
504 if (new_arg0 != gimple_cond_lhs (cond_stmt)
505 && new_arg0 != gimple_cond_rhs (cond_stmt)
506 && gimple_bb (arg0_def_stmt) == e0->src)
507 {
508 gsi = gsi_for_stmt (arg0_def_stmt);
509 gsi_prev_nondebug (&gsi);
510 if (!gsi_end_p (gsi))
511 {
512 if (gassign *assign
513 = dyn_cast <gassign *> (gsi_stmt (gsi)))
514 {
515 tree lhs = gimple_assign_lhs (assign);
516 enum tree_code ass_code
517 = gimple_assign_rhs_code (assign);
518 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
519 return NULL;
520 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
521 return NULL;
522 gsi_prev_nondebug (&gsi);
523 if (!gsi_end_p (gsi))
524 return NULL;
525 }
526 else
527 return NULL;
528 }
529 gsi = gsi_for_stmt (arg0_def_stmt);
530 gsi_next_nondebug (&gsi);
531 if (!gsi_end_p (gsi))
532 return NULL;
533 }
534 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
535 }
536 else
537 return NULL;
538 }
539 else
540 return NULL;
541 }
542
543 /* If arg0/arg1 have > 1 use, then this transformation actually increases
544 the number of expressions evaluated at runtime. */
545 if (!has_single_use (arg0)
546 || (arg1_def_stmt && !has_single_use (arg1)))
547 return NULL;
548
549 /* If types of new_arg0 and new_arg1 are different bailout. */
550 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
551 return NULL;
552
553 /* Create a new PHI stmt. */
554 result = PHI_RESULT (phi);
555 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
556 newphi = create_phi_node (temp, gimple_bb (phi));
557
558 if (dump_file && (dump_flags & TDF_DETAILS))
559 {
560 fprintf (dump_file, "PHI ");
561 print_generic_expr (dump_file, gimple_phi_result (phi));
562 fprintf (dump_file,
563 " changed to factor conversion out from COND_EXPR.\n");
564 fprintf (dump_file, "New stmt with CAST that defines ");
565 print_generic_expr (dump_file, result);
566 fprintf (dump_file, ".\n");
567 }
568
569 /* Remove the old cast(s) that has single use. */
570 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
571 gsi_remove (&gsi_for_def, true);
572 release_defs (arg0_def_stmt);
573
574 if (arg1_def_stmt)
575 {
576 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
577 gsi_remove (&gsi_for_def, true);
578 release_defs (arg1_def_stmt);
579 }
580
581 add_phi_arg (newphi, new_arg0, e0, locus);
582 add_phi_arg (newphi, new_arg1, e1, locus);
583
584 /* Create the conversion stmt and insert it. */
585 if (convert_code == VIEW_CONVERT_EXPR)
586 {
587 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
588 new_stmt = gimple_build_assign (result, temp);
589 }
590 else
591 new_stmt = gimple_build_assign (result, convert_code, temp);
592 gsi = gsi_after_labels (gimple_bb (phi));
593 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
594
595 /* Remove the original PHI stmt. */
596 gsi = gsi_for_stmt (phi);
597 gsi_remove (&gsi, true);
598 return newphi;
599 }
600
601 /* Optimize
602 # x_5 in range [cst1, cst2] where cst2 = cst1 + 1
603 if (x_5 op cstN) # where op is == or != and N is 1 or 2
604 goto bb3;
605 else
606 goto bb4;
607 bb3:
608 bb4:
609 # r_6 = PHI<cst3(2), cst4(3)> # where cst3 == cst4 + 1 or cst4 == cst3 + 1
610
611 to r_6 = x_5 + (min (cst3, cst4) - cst1) or
612 r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
613 of cst3 and cst4 is smaller. */
614
615 static bool
616 two_value_replacement (basic_block cond_bb, basic_block middle_bb,
617 edge e1, gphi *phi, tree arg0, tree arg1)
618 {
619 /* Only look for adjacent integer constants. */
620 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
621 || !INTEGRAL_TYPE_P (TREE_TYPE (arg1))
622 || TREE_CODE (arg0) != INTEGER_CST
623 || TREE_CODE (arg1) != INTEGER_CST
624 || (tree_int_cst_lt (arg0, arg1)
625 ? wi::to_widest (arg0) + 1 != wi::to_widest (arg1)
626 : wi::to_widest (arg1) + 1 != wi::to_widest (arg0)))
627 return false;
628
629 if (!empty_block_p (middle_bb))
630 return false;
631
632 gimple *stmt = last_stmt (cond_bb);
633 tree lhs = gimple_cond_lhs (stmt);
634 tree rhs = gimple_cond_rhs (stmt);
635
636 if (TREE_CODE (lhs) != SSA_NAME
637 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
638 || TREE_CODE (rhs) != INTEGER_CST)
639 return false;
640
641 switch (gimple_cond_code (stmt))
642 {
643 case EQ_EXPR:
644 case NE_EXPR:
645 break;
646 default:
647 return false;
648 }
649
650 /* Defer boolean x ? 0 : {1,-1} or x ? {1,-1} : 0 to
651 conditional_replacement. */
652 if (TREE_CODE (TREE_TYPE (lhs)) == BOOLEAN_TYPE
653 && (integer_zerop (arg0)
654 || integer_zerop (arg1)
655 || TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
656 || (TYPE_PRECISION (TREE_TYPE (arg0))
657 <= TYPE_PRECISION (TREE_TYPE (lhs)))))
658 return false;
659
660 wide_int min, max;
661 if (get_range_info (lhs, &min, &max) != VR_RANGE)
662 {
663 int prec = TYPE_PRECISION (TREE_TYPE (lhs));
664 signop sgn = TYPE_SIGN (TREE_TYPE (lhs));
665 min = wi::min_value (prec, sgn);
666 max = wi::max_value (prec, sgn);
667 }
668 if (min + 1 != max
669 || (wi::to_wide (rhs) != min
670 && wi::to_wide (rhs) != max))
671 return false;
672
673 /* We need to know which is the true edge and which is the false
674 edge so that we know when to invert the condition below. */
675 edge true_edge, false_edge;
676 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
677 if ((gimple_cond_code (stmt) == EQ_EXPR)
678 ^ (wi::to_wide (rhs) == max)
679 ^ (e1 == false_edge))
680 std::swap (arg0, arg1);
681
682 tree type;
683 if (TYPE_PRECISION (TREE_TYPE (lhs)) == TYPE_PRECISION (TREE_TYPE (arg0)))
684 {
685 /* Avoid performing the arithmetics in bool type which has different
686 semantics, otherwise prefer unsigned types from the two with
687 the same precision. */
688 if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
689 || !TYPE_UNSIGNED (TREE_TYPE (arg0)))
690 type = TREE_TYPE (lhs);
691 else
692 type = TREE_TYPE (arg0);
693 }
694 else if (TYPE_PRECISION (TREE_TYPE (lhs)) > TYPE_PRECISION (TREE_TYPE (arg0)))
695 type = TREE_TYPE (lhs);
696 else
697 type = TREE_TYPE (arg0);
698
699 min = wide_int::from (min, TYPE_PRECISION (type),
700 TYPE_SIGN (TREE_TYPE (lhs)));
701 wide_int a = wide_int::from (wi::to_wide (arg0), TYPE_PRECISION (type),
702 TYPE_SIGN (TREE_TYPE (arg0)));
703 enum tree_code code;
704 wi::overflow_type ovf;
705 if (tree_int_cst_lt (arg0, arg1))
706 {
707 code = PLUS_EXPR;
708 a -= min;
709 if (!TYPE_UNSIGNED (type))
710 {
711 /* lhs is known to be in range [min, min+1] and we want to add a
712 to it. Check if that operation can overflow for those 2 values
713 and if yes, force unsigned type. */
714 wi::add (min + (wi::neg_p (a) ? 0 : 1), a, SIGNED, &ovf);
715 if (ovf)
716 type = unsigned_type_for (type);
717 }
718 }
719 else
720 {
721 code = MINUS_EXPR;
722 a += min;
723 if (!TYPE_UNSIGNED (type))
724 {
725 /* lhs is known to be in range [min, min+1] and we want to subtract
726 it from a. Check if that operation can overflow for those 2
727 values and if yes, force unsigned type. */
728 wi::sub (a, min + (wi::neg_p (min) ? 0 : 1), SIGNED, &ovf);
729 if (ovf)
730 type = unsigned_type_for (type);
731 }
732 }
733
734 tree arg = wide_int_to_tree (type, a);
735 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
736 if (!useless_type_conversion_p (type, TREE_TYPE (lhs)))
737 lhs = gimplify_build1 (&gsi, NOP_EXPR, type, lhs);
738 tree new_rhs;
739 if (code == PLUS_EXPR)
740 new_rhs = gimplify_build2 (&gsi, PLUS_EXPR, type, lhs, arg);
741 else
742 new_rhs = gimplify_build2 (&gsi, MINUS_EXPR, type, arg, lhs);
743 if (!useless_type_conversion_p (TREE_TYPE (arg0), type))
744 new_rhs = gimplify_build1 (&gsi, NOP_EXPR, TREE_TYPE (arg0), new_rhs);
745
746 replace_phi_edge_with_variable (cond_bb, e1, phi, new_rhs);
747
748 /* Note that we optimized this PHI. */
749 return true;
750 }
751
752 /* The function conditional_replacement does the main work of doing the
753 conditional replacement. Return true if the replacement is done.
754 Otherwise return false.
755 BB is the basic block where the replacement is going to be done on. ARG0
756 is argument 0 from PHI. Likewise for ARG1. */
757
758 static bool
759 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
760 edge e0, edge e1, gphi *phi,
761 tree arg0, tree arg1)
762 {
763 tree result;
764 gimple *stmt;
765 gassign *new_stmt;
766 tree cond;
767 gimple_stmt_iterator gsi;
768 edge true_edge, false_edge;
769 tree new_var, new_var2;
770 bool neg = false;
771 int shift = 0;
772 tree nonzero_arg;
773
774 /* FIXME: Gimplification of complex type is too hard for now. */
775 /* We aren't prepared to handle vectors either (and it is a question
776 if it would be worthwhile anyway). */
777 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
778 || POINTER_TYPE_P (TREE_TYPE (arg0)))
779 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
780 || POINTER_TYPE_P (TREE_TYPE (arg1))))
781 return false;
782
783 /* The PHI arguments have the constants 0 and 1, or 0 and -1 or
784 0 and (1 << cst), then convert it to the conditional. */
785 if (integer_zerop (arg0))
786 nonzero_arg = arg1;
787 else if (integer_zerop (arg1))
788 nonzero_arg = arg0;
789 else
790 return false;
791 if (integer_all_onesp (nonzero_arg))
792 neg = true;
793 else if (integer_pow2p (nonzero_arg))
794 {
795 shift = tree_log2 (nonzero_arg);
796 if (shift && POINTER_TYPE_P (TREE_TYPE (nonzero_arg)))
797 return false;
798 }
799 else
800 return false;
801
802 if (!empty_block_p (middle_bb))
803 return false;
804
805 /* At this point we know we have a GIMPLE_COND with two successors.
806 One successor is BB, the other successor is an empty block which
807 falls through into BB.
808
809 There is a single PHI node at the join point (BB) and its arguments
810 are constants (0, 1) or (0, -1) or (0, (1 << shift)).
811
812 So, given the condition COND, and the two PHI arguments, we can
813 rewrite this PHI into non-branching code:
814
815 dest = (COND) or dest = COND' or dest = (COND) << shift
816
817 We use the condition as-is if the argument associated with the
818 true edge has the value one or the argument associated with the
819 false edge as the value zero. Note that those conditions are not
820 the same since only one of the outgoing edges from the GIMPLE_COND
821 will directly reach BB and thus be associated with an argument. */
822
823 stmt = last_stmt (cond_bb);
824 result = PHI_RESULT (phi);
825
826 /* To handle special cases like floating point comparison, it is easier and
827 less error-prone to build a tree and gimplify it on the fly though it is
828 less efficient. */
829 cond = fold_build2_loc (gimple_location (stmt),
830 gimple_cond_code (stmt), boolean_type_node,
831 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
832
833 /* We need to know which is the true edge and which is the false
834 edge so that we know when to invert the condition below. */
835 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
836 if ((e0 == true_edge && integer_zerop (arg0))
837 || (e0 == false_edge && !integer_zerop (arg0))
838 || (e1 == true_edge && integer_zerop (arg1))
839 || (e1 == false_edge && !integer_zerop (arg1)))
840 cond = fold_build1_loc (gimple_location (stmt),
841 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
842
843 if (neg)
844 {
845 cond = fold_convert_loc (gimple_location (stmt),
846 TREE_TYPE (result), cond);
847 cond = fold_build1_loc (gimple_location (stmt),
848 NEGATE_EXPR, TREE_TYPE (cond), cond);
849 }
850 else if (shift)
851 {
852 cond = fold_convert_loc (gimple_location (stmt),
853 TREE_TYPE (result), cond);
854 cond = fold_build2_loc (gimple_location (stmt),
855 LSHIFT_EXPR, TREE_TYPE (cond), cond,
856 build_int_cst (integer_type_node, shift));
857 }
858
859 /* Insert our new statements at the end of conditional block before the
860 COND_STMT. */
861 gsi = gsi_for_stmt (stmt);
862 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
863 GSI_SAME_STMT);
864
865 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
866 {
867 location_t locus_0, locus_1;
868
869 new_var2 = make_ssa_name (TREE_TYPE (result));
870 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
871 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
872 new_var = new_var2;
873
874 /* Set the locus to the first argument, unless is doesn't have one. */
875 locus_0 = gimple_phi_arg_location (phi, 0);
876 locus_1 = gimple_phi_arg_location (phi, 1);
877 if (locus_0 == UNKNOWN_LOCATION)
878 locus_0 = locus_1;
879 gimple_set_location (new_stmt, locus_0);
880 }
881
882 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
883
884 /* Note that we optimized this PHI. */
885 return true;
886 }
887
888 /* Update *ARG which is defined in STMT so that it contains the
889 computed value if that seems profitable. Return true if the
890 statement is made dead by that rewriting. */
891
892 static bool
893 jump_function_from_stmt (tree *arg, gimple *stmt)
894 {
895 enum tree_code code = gimple_assign_rhs_code (stmt);
896 if (code == ADDR_EXPR)
897 {
898 /* For arg = &p->i transform it to p, if possible. */
899 tree rhs1 = gimple_assign_rhs1 (stmt);
900 poly_int64 offset;
901 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
902 &offset);
903 if (tem
904 && TREE_CODE (tem) == MEM_REF
905 && known_eq (mem_ref_offset (tem) + offset, 0))
906 {
907 *arg = TREE_OPERAND (tem, 0);
908 return true;
909 }
910 }
911 /* TODO: Much like IPA-CP jump-functions we want to handle constant
912 additions symbolically here, and we'd need to update the comparison
913 code that compares the arg + cst tuples in our caller. For now the
914 code above exactly handles the VEC_BASE pattern from vec.h. */
915 return false;
916 }
917
918 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
919 of the form SSA_NAME NE 0.
920
921 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
922 the two input values of the EQ_EXPR match arg0 and arg1.
923
924 If so update *code and return TRUE. Otherwise return FALSE. */
925
926 static bool
927 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
928 enum tree_code *code, const_tree rhs)
929 {
930 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
931 statement. */
932 if (TREE_CODE (rhs) == SSA_NAME)
933 {
934 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
935
936 /* Verify the defining statement has an EQ_EXPR on the RHS. */
937 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
938 {
939 /* Finally verify the source operands of the EQ_EXPR are equal
940 to arg0 and arg1. */
941 tree op0 = gimple_assign_rhs1 (def1);
942 tree op1 = gimple_assign_rhs2 (def1);
943 if ((operand_equal_for_phi_arg_p (arg0, op0)
944 && operand_equal_for_phi_arg_p (arg1, op1))
945 || (operand_equal_for_phi_arg_p (arg0, op1)
946 && operand_equal_for_phi_arg_p (arg1, op0)))
947 {
948 /* We will perform the optimization. */
949 *code = gimple_assign_rhs_code (def1);
950 return true;
951 }
952 }
953 }
954 return false;
955 }
956
957 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
958
959 Also return TRUE if arg0/arg1 are equal to the source arguments of a
960 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
961
962 Return FALSE otherwise. */
963
964 static bool
965 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
966 enum tree_code *code, gimple *cond)
967 {
968 gimple *def;
969 tree lhs = gimple_cond_lhs (cond);
970 tree rhs = gimple_cond_rhs (cond);
971
972 if ((operand_equal_for_phi_arg_p (arg0, lhs)
973 && operand_equal_for_phi_arg_p (arg1, rhs))
974 || (operand_equal_for_phi_arg_p (arg1, lhs)
975 && operand_equal_for_phi_arg_p (arg0, rhs)))
976 return true;
977
978 /* Now handle more complex case where we have an EQ comparison
979 which feeds a BIT_AND_EXPR which feeds COND.
980
981 First verify that COND is of the form SSA_NAME NE 0. */
982 if (*code != NE_EXPR || !integer_zerop (rhs)
983 || TREE_CODE (lhs) != SSA_NAME)
984 return false;
985
986 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
987 def = SSA_NAME_DEF_STMT (lhs);
988 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
989 return false;
990
991 /* Now verify arg0/arg1 correspond to the source arguments of an
992 EQ comparison feeding the BIT_AND_EXPR. */
993
994 tree tmp = gimple_assign_rhs1 (def);
995 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
996 return true;
997
998 tmp = gimple_assign_rhs2 (def);
999 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1000 return true;
1001
1002 return false;
1003 }
1004
1005 /* Returns true if ARG is a neutral element for operation CODE
1006 on the RIGHT side. */
1007
1008 static bool
1009 neutral_element_p (tree_code code, tree arg, bool right)
1010 {
1011 switch (code)
1012 {
1013 case PLUS_EXPR:
1014 case BIT_IOR_EXPR:
1015 case BIT_XOR_EXPR:
1016 return integer_zerop (arg);
1017
1018 case LROTATE_EXPR:
1019 case RROTATE_EXPR:
1020 case LSHIFT_EXPR:
1021 case RSHIFT_EXPR:
1022 case MINUS_EXPR:
1023 case POINTER_PLUS_EXPR:
1024 return right && integer_zerop (arg);
1025
1026 case MULT_EXPR:
1027 return integer_onep (arg);
1028
1029 case TRUNC_DIV_EXPR:
1030 case CEIL_DIV_EXPR:
1031 case FLOOR_DIV_EXPR:
1032 case ROUND_DIV_EXPR:
1033 case EXACT_DIV_EXPR:
1034 return right && integer_onep (arg);
1035
1036 case BIT_AND_EXPR:
1037 return integer_all_onesp (arg);
1038
1039 default:
1040 return false;
1041 }
1042 }
1043
1044 /* Returns true if ARG is an absorbing element for operation CODE. */
1045
1046 static bool
1047 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1048 {
1049 switch (code)
1050 {
1051 case BIT_IOR_EXPR:
1052 return integer_all_onesp (arg);
1053
1054 case MULT_EXPR:
1055 case BIT_AND_EXPR:
1056 return integer_zerop (arg);
1057
1058 case LSHIFT_EXPR:
1059 case RSHIFT_EXPR:
1060 case LROTATE_EXPR:
1061 case RROTATE_EXPR:
1062 return !right && integer_zerop (arg);
1063
1064 case TRUNC_DIV_EXPR:
1065 case CEIL_DIV_EXPR:
1066 case FLOOR_DIV_EXPR:
1067 case ROUND_DIV_EXPR:
1068 case EXACT_DIV_EXPR:
1069 case TRUNC_MOD_EXPR:
1070 case CEIL_MOD_EXPR:
1071 case FLOOR_MOD_EXPR:
1072 case ROUND_MOD_EXPR:
1073 return (!right
1074 && integer_zerop (arg)
1075 && tree_single_nonzero_warnv_p (rval, NULL));
1076
1077 default:
1078 return false;
1079 }
1080 }
1081
1082 /* The function value_replacement does the main work of doing the value
1083 replacement. Return non-zero if the replacement is done. Otherwise return
1084 0. If we remove the middle basic block, return 2.
1085 BB is the basic block where the replacement is going to be done on. ARG0
1086 is argument 0 from the PHI. Likewise for ARG1. */
1087
1088 static int
1089 value_replacement (basic_block cond_bb, basic_block middle_bb,
1090 edge e0, edge e1, gimple *phi,
1091 tree arg0, tree arg1)
1092 {
1093 gimple_stmt_iterator gsi;
1094 gimple *cond;
1095 edge true_edge, false_edge;
1096 enum tree_code code;
1097 bool empty_or_with_defined_p = true;
1098
1099 /* If the type says honor signed zeros we cannot do this
1100 optimization. */
1101 if (HONOR_SIGNED_ZEROS (arg1))
1102 return 0;
1103
1104 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1105 arguments, then adjust arg0 or arg1. */
1106 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1107 while (!gsi_end_p (gsi))
1108 {
1109 gimple *stmt = gsi_stmt (gsi);
1110 tree lhs;
1111 gsi_next_nondebug (&gsi);
1112 if (!is_gimple_assign (stmt))
1113 {
1114 if (gimple_code (stmt) != GIMPLE_PREDICT
1115 && gimple_code (stmt) != GIMPLE_NOP)
1116 empty_or_with_defined_p = false;
1117 continue;
1118 }
1119 /* Now try to adjust arg0 or arg1 according to the computation
1120 in the statement. */
1121 lhs = gimple_assign_lhs (stmt);
1122 if (!(lhs == arg0
1123 && jump_function_from_stmt (&arg0, stmt))
1124 || (lhs == arg1
1125 && jump_function_from_stmt (&arg1, stmt)))
1126 empty_or_with_defined_p = false;
1127 }
1128
1129 cond = last_stmt (cond_bb);
1130 code = gimple_cond_code (cond);
1131
1132 /* This transformation is only valid for equality comparisons. */
1133 if (code != NE_EXPR && code != EQ_EXPR)
1134 return 0;
1135
1136 /* We need to know which is the true edge and which is the false
1137 edge so that we know if have abs or negative abs. */
1138 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1139
1140 /* At this point we know we have a COND_EXPR with two successors.
1141 One successor is BB, the other successor is an empty block which
1142 falls through into BB.
1143
1144 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1145
1146 There is a single PHI node at the join point (BB) with two arguments.
1147
1148 We now need to verify that the two arguments in the PHI node match
1149 the two arguments to the equality comparison. */
1150
1151 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
1152 {
1153 edge e;
1154 tree arg;
1155
1156 /* For NE_EXPR, we want to build an assignment result = arg where
1157 arg is the PHI argument associated with the true edge. For
1158 EQ_EXPR we want the PHI argument associated with the false edge. */
1159 e = (code == NE_EXPR ? true_edge : false_edge);
1160
1161 /* Unfortunately, E may not reach BB (it may instead have gone to
1162 OTHER_BLOCK). If that is the case, then we want the single outgoing
1163 edge from OTHER_BLOCK which reaches BB and represents the desired
1164 path from COND_BLOCK. */
1165 if (e->dest == middle_bb)
1166 e = single_succ_edge (e->dest);
1167
1168 /* Now we know the incoming edge to BB that has the argument for the
1169 RHS of our new assignment statement. */
1170 if (e0 == e)
1171 arg = arg0;
1172 else
1173 arg = arg1;
1174
1175 /* If the middle basic block was empty or is defining the
1176 PHI arguments and this is a single phi where the args are different
1177 for the edges e0 and e1 then we can remove the middle basic block. */
1178 if (empty_or_with_defined_p
1179 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1180 e0, e1) == phi)
1181 {
1182 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1183 /* Note that we optimized this PHI. */
1184 return 2;
1185 }
1186 else
1187 {
1188 /* Replace the PHI arguments with arg. */
1189 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1190 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1191 if (dump_file && (dump_flags & TDF_DETAILS))
1192 {
1193 fprintf (dump_file, "PHI ");
1194 print_generic_expr (dump_file, gimple_phi_result (phi));
1195 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1196 cond_bb->index);
1197 print_generic_expr (dump_file, arg);
1198 fprintf (dump_file, ".\n");
1199 }
1200 return 1;
1201 }
1202
1203 }
1204
1205 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1206 gsi = gsi_last_nondebug_bb (middle_bb);
1207 if (gsi_end_p (gsi))
1208 return 0;
1209
1210 gimple *assign = gsi_stmt (gsi);
1211 if (!is_gimple_assign (assign)
1212 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
1213 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1214 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1215 return 0;
1216
1217 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1218 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1219 return 0;
1220
1221 /* Allow up to 2 cheap preparation statements that prepare argument
1222 for assign, e.g.:
1223 if (y_4 != 0)
1224 goto <bb 3>;
1225 else
1226 goto <bb 4>;
1227 <bb 3>:
1228 _1 = (int) y_4;
1229 iftmp.0_6 = x_5(D) r<< _1;
1230 <bb 4>:
1231 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1232 or:
1233 if (y_3(D) == 0)
1234 goto <bb 4>;
1235 else
1236 goto <bb 3>;
1237 <bb 3>:
1238 y_4 = y_3(D) & 31;
1239 _1 = (int) y_4;
1240 _6 = x_5(D) r<< _1;
1241 <bb 4>:
1242 # _2 = PHI <x_5(D)(2), _6(3)> */
1243 gimple *prep_stmt[2] = { NULL, NULL };
1244 int prep_cnt;
1245 for (prep_cnt = 0; ; prep_cnt++)
1246 {
1247 gsi_prev_nondebug (&gsi);
1248 if (gsi_end_p (gsi))
1249 break;
1250
1251 gimple *g = gsi_stmt (gsi);
1252 if (gimple_code (g) == GIMPLE_LABEL)
1253 break;
1254
1255 if (prep_cnt == 2 || !is_gimple_assign (g))
1256 return 0;
1257
1258 tree lhs = gimple_assign_lhs (g);
1259 tree rhs1 = gimple_assign_rhs1 (g);
1260 use_operand_p use_p;
1261 gimple *use_stmt;
1262 if (TREE_CODE (lhs) != SSA_NAME
1263 || TREE_CODE (rhs1) != SSA_NAME
1264 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1265 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1266 || !single_imm_use (lhs, &use_p, &use_stmt)
1267 || use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign))
1268 return 0;
1269 switch (gimple_assign_rhs_code (g))
1270 {
1271 CASE_CONVERT:
1272 break;
1273 case PLUS_EXPR:
1274 case BIT_AND_EXPR:
1275 case BIT_IOR_EXPR:
1276 case BIT_XOR_EXPR:
1277 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1278 return 0;
1279 break;
1280 default:
1281 return 0;
1282 }
1283 prep_stmt[prep_cnt] = g;
1284 }
1285
1286 /* Only transform if it removes the condition. */
1287 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1288 return 0;
1289
1290 /* Size-wise, this is always profitable. */
1291 if (optimize_bb_for_speed_p (cond_bb)
1292 /* The special case is useless if it has a low probability. */
1293 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1294 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1295 /* If assign is cheap, there is no point avoiding it. */
1296 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1297 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1298 return 0;
1299
1300 tree lhs = gimple_assign_lhs (assign);
1301 tree rhs1 = gimple_assign_rhs1 (assign);
1302 tree rhs2 = gimple_assign_rhs2 (assign);
1303 enum tree_code code_def = gimple_assign_rhs_code (assign);
1304 tree cond_lhs = gimple_cond_lhs (cond);
1305 tree cond_rhs = gimple_cond_rhs (cond);
1306
1307 /* Propagate the cond_rhs constant through preparation stmts,
1308 make sure UB isn't invoked while doing that. */
1309 for (int i = prep_cnt - 1; i >= 0; --i)
1310 {
1311 gimple *g = prep_stmt[i];
1312 tree grhs1 = gimple_assign_rhs1 (g);
1313 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1314 return 0;
1315 cond_lhs = gimple_assign_lhs (g);
1316 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1317 if (TREE_CODE (cond_rhs) != INTEGER_CST
1318 || TREE_OVERFLOW (cond_rhs))
1319 return 0;
1320 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1321 {
1322 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1323 gimple_assign_rhs2 (g));
1324 if (TREE_OVERFLOW (cond_rhs))
1325 return 0;
1326 }
1327 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1328 if (TREE_CODE (cond_rhs) != INTEGER_CST
1329 || TREE_OVERFLOW (cond_rhs))
1330 return 0;
1331 }
1332
1333 if (((code == NE_EXPR && e1 == false_edge)
1334 || (code == EQ_EXPR && e1 == true_edge))
1335 && arg0 == lhs
1336 && ((arg1 == rhs1
1337 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1338 && neutral_element_p (code_def, cond_rhs, true))
1339 || (arg1 == rhs2
1340 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1341 && neutral_element_p (code_def, cond_rhs, false))
1342 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
1343 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1344 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1345 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1346 && absorbing_element_p (code_def,
1347 cond_rhs, false, rhs2))))))
1348 {
1349 gsi = gsi_for_stmt (cond);
1350 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1351 def-stmt in:
1352 if (n_5 != 0)
1353 goto <bb 3>;
1354 else
1355 goto <bb 4>;
1356
1357 <bb 3>:
1358 # RANGE [0, 4294967294]
1359 u_6 = n_5 + 4294967295;
1360
1361 <bb 4>:
1362 # u_3 = PHI <u_6(3), 4294967295(2)> */
1363 reset_flow_sensitive_info (lhs);
1364 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1365 {
1366 /* If available, we can use VR of phi result at least. */
1367 tree phires = gimple_phi_result (phi);
1368 struct range_info_def *phires_range_info
1369 = SSA_NAME_RANGE_INFO (phires);
1370 if (phires_range_info)
1371 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1372 phires_range_info);
1373 }
1374 gimple_stmt_iterator gsi_from;
1375 for (int i = prep_cnt - 1; i >= 0; --i)
1376 {
1377 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1378 reset_flow_sensitive_info (plhs);
1379 gsi_from = gsi_for_stmt (prep_stmt[i]);
1380 gsi_move_before (&gsi_from, &gsi);
1381 }
1382 gsi_from = gsi_for_stmt (assign);
1383 gsi_move_before (&gsi_from, &gsi);
1384 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1385 return 2;
1386 }
1387
1388 return 0;
1389 }
1390
1391 /* The function minmax_replacement does the main work of doing the minmax
1392 replacement. Return true if the replacement is done. Otherwise return
1393 false.
1394 BB is the basic block where the replacement is going to be done on. ARG0
1395 is argument 0 from the PHI. Likewise for ARG1. */
1396
1397 static bool
1398 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1399 edge e0, edge e1, gimple *phi,
1400 tree arg0, tree arg1)
1401 {
1402 tree result;
1403 edge true_edge, false_edge;
1404 enum tree_code minmax, ass_code;
1405 tree smaller, larger, arg_true, arg_false;
1406 gimple_stmt_iterator gsi, gsi_from;
1407
1408 tree type = TREE_TYPE (PHI_RESULT (phi));
1409
1410 /* The optimization may be unsafe due to NaNs. */
1411 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1412 return false;
1413
1414 gcond *cond = as_a <gcond *> (last_stmt (cond_bb));
1415 enum tree_code cmp = gimple_cond_code (cond);
1416 tree rhs = gimple_cond_rhs (cond);
1417
1418 /* Turn EQ/NE of extreme values to order comparisons. */
1419 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1420 && TREE_CODE (rhs) == INTEGER_CST
1421 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1422 {
1423 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1424 {
1425 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1426 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1427 wi::min_value (TREE_TYPE (rhs)) + 1);
1428 }
1429 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1430 {
1431 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1432 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1433 wi::max_value (TREE_TYPE (rhs)) - 1);
1434 }
1435 }
1436
1437 /* This transformation is only valid for order comparisons. Record which
1438 operand is smaller/larger if the result of the comparison is true. */
1439 tree alt_smaller = NULL_TREE;
1440 tree alt_larger = NULL_TREE;
1441 if (cmp == LT_EXPR || cmp == LE_EXPR)
1442 {
1443 smaller = gimple_cond_lhs (cond);
1444 larger = rhs;
1445 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1446 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1447 if (TREE_CODE (larger) == INTEGER_CST
1448 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1449 {
1450 if (cmp == LT_EXPR)
1451 {
1452 wi::overflow_type overflow;
1453 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1454 TYPE_SIGN (TREE_TYPE (larger)),
1455 &overflow);
1456 if (! overflow)
1457 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1458 }
1459 else
1460 {
1461 wi::overflow_type overflow;
1462 wide_int alt = wi::add (wi::to_wide (larger), 1,
1463 TYPE_SIGN (TREE_TYPE (larger)),
1464 &overflow);
1465 if (! overflow)
1466 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1467 }
1468 }
1469 }
1470 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1471 {
1472 smaller = rhs;
1473 larger = gimple_cond_lhs (cond);
1474 /* If we have larger > CST it is equivalent to larger >= CST+1.
1475 Likewise larger >= CST is equivalent to larger > CST-1. */
1476 if (TREE_CODE (smaller) == INTEGER_CST
1477 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1478 {
1479 wi::overflow_type overflow;
1480 if (cmp == GT_EXPR)
1481 {
1482 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1483 TYPE_SIGN (TREE_TYPE (smaller)),
1484 &overflow);
1485 if (! overflow)
1486 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1487 }
1488 else
1489 {
1490 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1491 TYPE_SIGN (TREE_TYPE (smaller)),
1492 &overflow);
1493 if (! overflow)
1494 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1495 }
1496 }
1497 }
1498 else
1499 return false;
1500
1501 /* Handle the special case of (signed_type)x < 0 being equivalent
1502 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1503 to x <= MAX_VAL(signed_type). */
1504 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1505 && INTEGRAL_TYPE_P (type)
1506 && TYPE_UNSIGNED (type)
1507 && integer_zerop (rhs))
1508 {
1509 tree op = gimple_cond_lhs (cond);
1510 if (TREE_CODE (op) == SSA_NAME
1511 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1512 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1513 {
1514 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1515 if (gimple_assign_cast_p (def_stmt))
1516 {
1517 tree op1 = gimple_assign_rhs1 (def_stmt);
1518 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1519 && TYPE_UNSIGNED (TREE_TYPE (op1))
1520 && (TYPE_PRECISION (TREE_TYPE (op))
1521 == TYPE_PRECISION (TREE_TYPE (op1)))
1522 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1523 {
1524 wide_int w1 = wi::max_value (TREE_TYPE (op));
1525 wide_int w2 = wi::add (w1, 1);
1526 if (cmp == LT_EXPR)
1527 {
1528 larger = op1;
1529 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1530 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1531 alt_larger = NULL_TREE;
1532 }
1533 else
1534 {
1535 smaller = op1;
1536 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1537 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1538 alt_smaller = NULL_TREE;
1539 }
1540 }
1541 }
1542 }
1543 }
1544
1545 /* We need to know which is the true edge and which is the false
1546 edge so that we know if have abs or negative abs. */
1547 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1548
1549 /* Forward the edges over the middle basic block. */
1550 if (true_edge->dest == middle_bb)
1551 true_edge = EDGE_SUCC (true_edge->dest, 0);
1552 if (false_edge->dest == middle_bb)
1553 false_edge = EDGE_SUCC (false_edge->dest, 0);
1554
1555 if (true_edge == e0)
1556 {
1557 gcc_assert (false_edge == e1);
1558 arg_true = arg0;
1559 arg_false = arg1;
1560 }
1561 else
1562 {
1563 gcc_assert (false_edge == e0);
1564 gcc_assert (true_edge == e1);
1565 arg_true = arg1;
1566 arg_false = arg0;
1567 }
1568
1569 if (empty_block_p (middle_bb))
1570 {
1571 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1572 || (alt_smaller
1573 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1574 && (operand_equal_for_phi_arg_p (arg_false, larger)
1575 || (alt_larger
1576 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1577 {
1578 /* Case
1579
1580 if (smaller < larger)
1581 rslt = smaller;
1582 else
1583 rslt = larger; */
1584 minmax = MIN_EXPR;
1585 }
1586 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1587 || (alt_smaller
1588 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1589 && (operand_equal_for_phi_arg_p (arg_true, larger)
1590 || (alt_larger
1591 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1592 minmax = MAX_EXPR;
1593 else
1594 return false;
1595 }
1596 else
1597 {
1598 /* Recognize the following case, assuming d <= u:
1599
1600 if (a <= u)
1601 b = MAX (a, d);
1602 x = PHI <b, u>
1603
1604 This is equivalent to
1605
1606 b = MAX (a, d);
1607 x = MIN (b, u); */
1608
1609 gimple *assign = last_and_only_stmt (middle_bb);
1610 tree lhs, op0, op1, bound;
1611
1612 if (!assign
1613 || gimple_code (assign) != GIMPLE_ASSIGN)
1614 return false;
1615
1616 lhs = gimple_assign_lhs (assign);
1617 ass_code = gimple_assign_rhs_code (assign);
1618 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1619 return false;
1620 op0 = gimple_assign_rhs1 (assign);
1621 op1 = gimple_assign_rhs2 (assign);
1622
1623 if (true_edge->src == middle_bb)
1624 {
1625 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1626 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1627 return false;
1628
1629 if (operand_equal_for_phi_arg_p (arg_false, larger)
1630 || (alt_larger
1631 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1632 {
1633 /* Case
1634
1635 if (smaller < larger)
1636 {
1637 r' = MAX_EXPR (smaller, bound)
1638 }
1639 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1640 if (ass_code != MAX_EXPR)
1641 return false;
1642
1643 minmax = MIN_EXPR;
1644 if (operand_equal_for_phi_arg_p (op0, smaller)
1645 || (alt_smaller
1646 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1647 bound = op1;
1648 else if (operand_equal_for_phi_arg_p (op1, smaller)
1649 || (alt_smaller
1650 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1651 bound = op0;
1652 else
1653 return false;
1654
1655 /* We need BOUND <= LARGER. */
1656 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1657 bound, larger)))
1658 return false;
1659 }
1660 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1661 || (alt_smaller
1662 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1663 {
1664 /* Case
1665
1666 if (smaller < larger)
1667 {
1668 r' = MIN_EXPR (larger, bound)
1669 }
1670 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1671 if (ass_code != MIN_EXPR)
1672 return false;
1673
1674 minmax = MAX_EXPR;
1675 if (operand_equal_for_phi_arg_p (op0, larger)
1676 || (alt_larger
1677 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1678 bound = op1;
1679 else if (operand_equal_for_phi_arg_p (op1, larger)
1680 || (alt_larger
1681 && operand_equal_for_phi_arg_p (op1, alt_larger)))
1682 bound = op0;
1683 else
1684 return false;
1685
1686 /* We need BOUND >= SMALLER. */
1687 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1688 bound, smaller)))
1689 return false;
1690 }
1691 else
1692 return false;
1693 }
1694 else
1695 {
1696 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1697 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1698 return false;
1699
1700 if (operand_equal_for_phi_arg_p (arg_true, larger)
1701 || (alt_larger
1702 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1703 {
1704 /* Case
1705
1706 if (smaller > larger)
1707 {
1708 r' = MIN_EXPR (smaller, bound)
1709 }
1710 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1711 if (ass_code != MIN_EXPR)
1712 return false;
1713
1714 minmax = MAX_EXPR;
1715 if (operand_equal_for_phi_arg_p (op0, smaller)
1716 || (alt_smaller
1717 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1718 bound = op1;
1719 else if (operand_equal_for_phi_arg_p (op1, smaller)
1720 || (alt_smaller
1721 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1722 bound = op0;
1723 else
1724 return false;
1725
1726 /* We need BOUND >= LARGER. */
1727 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1728 bound, larger)))
1729 return false;
1730 }
1731 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1732 || (alt_smaller
1733 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1734 {
1735 /* Case
1736
1737 if (smaller > larger)
1738 {
1739 r' = MAX_EXPR (larger, bound)
1740 }
1741 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1742 if (ass_code != MAX_EXPR)
1743 return false;
1744
1745 minmax = MIN_EXPR;
1746 if (operand_equal_for_phi_arg_p (op0, larger))
1747 bound = op1;
1748 else if (operand_equal_for_phi_arg_p (op1, larger))
1749 bound = op0;
1750 else
1751 return false;
1752
1753 /* We need BOUND <= SMALLER. */
1754 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1755 bound, smaller)))
1756 return false;
1757 }
1758 else
1759 return false;
1760 }
1761
1762 /* Move the statement from the middle block. */
1763 gsi = gsi_last_bb (cond_bb);
1764 gsi_from = gsi_last_nondebug_bb (middle_bb);
1765 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
1766 SSA_OP_DEF));
1767 gsi_move_before (&gsi_from, &gsi);
1768 }
1769
1770 /* Emit the statement to compute min/max. */
1771 gimple_seq stmts = NULL;
1772 tree phi_result = PHI_RESULT (phi);
1773 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
1774 /* Duplicate range info if we're the only things setting the target PHI. */
1775 if (!gimple_seq_empty_p (stmts)
1776 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
1777 && !POINTER_TYPE_P (TREE_TYPE (phi_result))
1778 && SSA_NAME_RANGE_INFO (phi_result))
1779 duplicate_ssa_name_range_info (result, SSA_NAME_RANGE_TYPE (phi_result),
1780 SSA_NAME_RANGE_INFO (phi_result));
1781
1782 gsi = gsi_last_bb (cond_bb);
1783 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
1784
1785 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1786
1787 return true;
1788 }
1789
1790 /* Convert
1791
1792 <bb 2>
1793 if (b_4(D) != 0)
1794 goto <bb 3>
1795 else
1796 goto <bb 4>
1797
1798 <bb 3>
1799 _2 = (unsigned long) b_4(D);
1800 _9 = __builtin_popcountl (_2);
1801 OR
1802 _9 = __builtin_popcountl (b_4(D));
1803
1804 <bb 4>
1805 c_12 = PHI <0(2), _9(3)>
1806
1807 Into
1808 <bb 2>
1809 _2 = (unsigned long) b_4(D);
1810 _9 = __builtin_popcountl (_2);
1811 OR
1812 _9 = __builtin_popcountl (b_4(D));
1813
1814 <bb 4>
1815 c_12 = PHI <_9(2)>
1816
1817 Similarly for __builtin_clz or __builtin_ctz if
1818 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
1819 instead of 0 above it uses the value from that macro. */
1820
1821 static bool
1822 cond_removal_in_popcount_clz_ctz_pattern (basic_block cond_bb,
1823 basic_block middle_bb,
1824 edge e1, edge e2, gimple *phi,
1825 tree arg0, tree arg1)
1826 {
1827 gimple *cond;
1828 gimple_stmt_iterator gsi, gsi_from;
1829 gimple *call;
1830 gimple *cast = NULL;
1831 tree lhs, arg;
1832
1833 /* Check that
1834 _2 = (unsigned long) b_4(D);
1835 _9 = __builtin_popcountl (_2);
1836 OR
1837 _9 = __builtin_popcountl (b_4(D));
1838 are the only stmts in the middle_bb. */
1839
1840 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1841 if (gsi_end_p (gsi))
1842 return false;
1843 cast = gsi_stmt (gsi);
1844 gsi_next_nondebug (&gsi);
1845 if (!gsi_end_p (gsi))
1846 {
1847 call = gsi_stmt (gsi);
1848 gsi_next_nondebug (&gsi);
1849 if (!gsi_end_p (gsi))
1850 return false;
1851 }
1852 else
1853 {
1854 call = cast;
1855 cast = NULL;
1856 }
1857
1858 /* Check that we have a popcount/clz/ctz builtin. */
1859 if (!is_gimple_call (call) || gimple_call_num_args (call) != 1)
1860 return false;
1861
1862 arg = gimple_call_arg (call, 0);
1863 lhs = gimple_get_lhs (call);
1864
1865 if (lhs == NULL_TREE)
1866 return false;
1867
1868 combined_fn cfn = gimple_call_combined_fn (call);
1869 internal_fn ifn = IFN_LAST;
1870 int val = 0;
1871 switch (cfn)
1872 {
1873 CASE_CFN_POPCOUNT:
1874 break;
1875 CASE_CFN_CLZ:
1876 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
1877 {
1878 tree type = TREE_TYPE (arg);
1879 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
1880 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
1881 val) == 2)
1882 {
1883 ifn = IFN_CLZ;
1884 break;
1885 }
1886 }
1887 return false;
1888 CASE_CFN_CTZ:
1889 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
1890 {
1891 tree type = TREE_TYPE (arg);
1892 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
1893 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
1894 val) == 2)
1895 {
1896 ifn = IFN_CTZ;
1897 break;
1898 }
1899 }
1900 return false;
1901 default:
1902 return false;
1903 }
1904
1905 if (cast)
1906 {
1907 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
1908 /* Check that we have a cast prior to that. */
1909 if (gimple_code (cast) != GIMPLE_ASSIGN
1910 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
1911 return false;
1912 /* Result of the cast stmt is the argument to the builtin. */
1913 if (arg != gimple_assign_lhs (cast))
1914 return false;
1915 arg = gimple_assign_rhs1 (cast);
1916 }
1917
1918 cond = last_stmt (cond_bb);
1919
1920 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
1921 builtin. */
1922 if (gimple_code (cond) != GIMPLE_COND
1923 || (gimple_cond_code (cond) != NE_EXPR
1924 && gimple_cond_code (cond) != EQ_EXPR)
1925 || !integer_zerop (gimple_cond_rhs (cond))
1926 || arg != gimple_cond_lhs (cond))
1927 return false;
1928
1929 /* Canonicalize. */
1930 if ((e2->flags & EDGE_TRUE_VALUE
1931 && gimple_cond_code (cond) == NE_EXPR)
1932 || (e1->flags & EDGE_TRUE_VALUE
1933 && gimple_cond_code (cond) == EQ_EXPR))
1934 {
1935 std::swap (arg0, arg1);
1936 std::swap (e1, e2);
1937 }
1938
1939 /* Check PHI arguments. */
1940 if (lhs != arg0
1941 || TREE_CODE (arg1) != INTEGER_CST
1942 || wi::to_wide (arg1) != val)
1943 return false;
1944
1945 /* And insert the popcount/clz/ctz builtin and cast stmt before the
1946 cond_bb. */
1947 gsi = gsi_last_bb (cond_bb);
1948 if (cast)
1949 {
1950 gsi_from = gsi_for_stmt (cast);
1951 gsi_move_before (&gsi_from, &gsi);
1952 reset_flow_sensitive_info (gimple_get_lhs (cast));
1953 }
1954 gsi_from = gsi_for_stmt (call);
1955 if (ifn == IFN_LAST || gimple_call_internal_p (call))
1956 gsi_move_before (&gsi_from, &gsi);
1957 else
1958 {
1959 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
1960 the latter is well defined at zero. */
1961 call = gimple_build_call_internal (ifn, 1, gimple_call_arg (call, 0));
1962 gimple_call_set_lhs (call, lhs);
1963 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
1964 gsi_remove (&gsi_from, true);
1965 }
1966 reset_flow_sensitive_info (lhs);
1967
1968 /* Now update the PHI and remove unneeded bbs. */
1969 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
1970 return true;
1971 }
1972
1973 /* The function absolute_replacement does the main work of doing the absolute
1974 replacement. Return true if the replacement is done. Otherwise return
1975 false.
1976 bb is the basic block where the replacement is going to be done on. arg0
1977 is argument 0 from the phi. Likewise for arg1. */
1978
1979 static bool
1980 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1981 edge e0 ATTRIBUTE_UNUSED, edge e1,
1982 gimple *phi, tree arg0, tree arg1)
1983 {
1984 tree result;
1985 gassign *new_stmt;
1986 gimple *cond;
1987 gimple_stmt_iterator gsi;
1988 edge true_edge, false_edge;
1989 gimple *assign;
1990 edge e;
1991 tree rhs, lhs;
1992 bool negate;
1993 enum tree_code cond_code;
1994
1995 /* If the type says honor signed zeros we cannot do this
1996 optimization. */
1997 if (HONOR_SIGNED_ZEROS (arg1))
1998 return false;
1999
2000 /* OTHER_BLOCK must have only one executable statement which must have the
2001 form arg0 = -arg1 or arg1 = -arg0. */
2002
2003 assign = last_and_only_stmt (middle_bb);
2004 /* If we did not find the proper negation assignment, then we cannot
2005 optimize. */
2006 if (assign == NULL)
2007 return false;
2008
2009 /* If we got here, then we have found the only executable statement
2010 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
2011 arg1 = -arg0, then we cannot optimize. */
2012 if (gimple_code (assign) != GIMPLE_ASSIGN)
2013 return false;
2014
2015 lhs = gimple_assign_lhs (assign);
2016
2017 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
2018 return false;
2019
2020 rhs = gimple_assign_rhs1 (assign);
2021
2022 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
2023 if (!(lhs == arg0 && rhs == arg1)
2024 && !(lhs == arg1 && rhs == arg0))
2025 return false;
2026
2027 cond = last_stmt (cond_bb);
2028 result = PHI_RESULT (phi);
2029
2030 /* Only relationals comparing arg[01] against zero are interesting. */
2031 cond_code = gimple_cond_code (cond);
2032 if (cond_code != GT_EXPR && cond_code != GE_EXPR
2033 && cond_code != LT_EXPR && cond_code != LE_EXPR)
2034 return false;
2035
2036 /* Make sure the conditional is arg[01] OP y. */
2037 if (gimple_cond_lhs (cond) != rhs)
2038 return false;
2039
2040 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
2041 ? real_zerop (gimple_cond_rhs (cond))
2042 : integer_zerop (gimple_cond_rhs (cond)))
2043 ;
2044 else
2045 return false;
2046
2047 /* We need to know which is the true edge and which is the false
2048 edge so that we know if have abs or negative abs. */
2049 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
2050
2051 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
2052 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
2053 the false edge goes to OTHER_BLOCK. */
2054 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
2055 e = true_edge;
2056 else
2057 e = false_edge;
2058
2059 if (e->dest == middle_bb)
2060 negate = true;
2061 else
2062 negate = false;
2063
2064 /* If the code negates only iff positive then make sure to not
2065 introduce undefined behavior when negating or computing the absolute.
2066 ??? We could use range info if present to check for arg1 == INT_MIN. */
2067 if (negate
2068 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1))
2069 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1))))
2070 return false;
2071
2072 result = duplicate_ssa_name (result, NULL);
2073
2074 if (negate)
2075 lhs = make_ssa_name (TREE_TYPE (result));
2076 else
2077 lhs = result;
2078
2079 /* Build the modify expression with abs expression. */
2080 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
2081
2082 gsi = gsi_last_bb (cond_bb);
2083 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2084
2085 if (negate)
2086 {
2087 /* Get the right GSI. We want to insert after the recently
2088 added ABS_EXPR statement (which we know is the first statement
2089 in the block. */
2090 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
2091
2092 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2093 }
2094
2095 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2096
2097 /* Note that we optimized this PHI. */
2098 return true;
2099 }
2100
2101 /* Auxiliary functions to determine the set of memory accesses which
2102 can't trap because they are preceded by accesses to the same memory
2103 portion. We do that for MEM_REFs, so we only need to track
2104 the SSA_NAME of the pointer indirectly referenced. The algorithm
2105 simply is a walk over all instructions in dominator order. When
2106 we see an MEM_REF we determine if we've already seen a same
2107 ref anywhere up to the root of the dominator tree. If we do the
2108 current access can't trap. If we don't see any dominating access
2109 the current access might trap, but might also make later accesses
2110 non-trapping, so we remember it. We need to be careful with loads
2111 or stores, for instance a load might not trap, while a store would,
2112 so if we see a dominating read access this doesn't mean that a later
2113 write access would not trap. Hence we also need to differentiate the
2114 type of access(es) seen.
2115
2116 ??? We currently are very conservative and assume that a load might
2117 trap even if a store doesn't (write-only memory). This probably is
2118 overly conservative.
2119
2120 We currently support a special case that for !TREE_ADDRESSABLE automatic
2121 variables, it could ignore whether something is a load or store because the
2122 local stack should be always writable. */
2123
2124 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
2125 basic block an *_REF through it was seen, which would constitute a
2126 no-trap region for same accesses.
2127
2128 Size is needed to support 2 MEM_REFs of different types, like
2129 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
2130 OEP_ADDRESS_OF. */
2131 struct ref_to_bb
2132 {
2133 tree exp;
2134 HOST_WIDE_INT size;
2135 unsigned int phase;
2136 basic_block bb;
2137 };
2138
2139 /* Hashtable helpers. */
2140
2141 struct refs_hasher : free_ptr_hash<ref_to_bb>
2142 {
2143 static inline hashval_t hash (const ref_to_bb *);
2144 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
2145 };
2146
2147 /* Used for quick clearing of the hash-table when we see calls.
2148 Hash entries with phase < nt_call_phase are invalid. */
2149 static unsigned int nt_call_phase;
2150
2151 /* The hash function. */
2152
2153 inline hashval_t
2154 refs_hasher::hash (const ref_to_bb *n)
2155 {
2156 inchash::hash hstate;
2157 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
2158 hstate.add_hwi (n->size);
2159 return hstate.end ();
2160 }
2161
2162 /* The equality function of *P1 and *P2. */
2163
2164 inline bool
2165 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
2166 {
2167 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
2168 && n1->size == n2->size;
2169 }
2170
2171 class nontrapping_dom_walker : public dom_walker
2172 {
2173 public:
2174 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
2175 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
2176 {}
2177
2178 virtual edge before_dom_children (basic_block);
2179 virtual void after_dom_children (basic_block);
2180
2181 private:
2182
2183 /* We see the expression EXP in basic block BB. If it's an interesting
2184 expression (an MEM_REF through an SSA_NAME) possibly insert the
2185 expression into the set NONTRAP or the hash table of seen expressions.
2186 STORE is true if this expression is on the LHS, otherwise it's on
2187 the RHS. */
2188 void add_or_mark_expr (basic_block, tree, bool);
2189
2190 hash_set<tree> *m_nontrapping;
2191
2192 /* The hash table for remembering what we've seen. */
2193 hash_table<refs_hasher> m_seen_refs;
2194 };
2195
2196 /* Called by walk_dominator_tree, when entering the block BB. */
2197 edge
2198 nontrapping_dom_walker::before_dom_children (basic_block bb)
2199 {
2200 edge e;
2201 edge_iterator ei;
2202 gimple_stmt_iterator gsi;
2203
2204 /* If we haven't seen all our predecessors, clear the hash-table. */
2205 FOR_EACH_EDGE (e, ei, bb->preds)
2206 if ((((size_t)e->src->aux) & 2) == 0)
2207 {
2208 nt_call_phase++;
2209 break;
2210 }
2211
2212 /* Mark this BB as being on the path to dominator root and as visited. */
2213 bb->aux = (void*)(1 | 2);
2214
2215 /* And walk the statements in order. */
2216 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2217 {
2218 gimple *stmt = gsi_stmt (gsi);
2219
2220 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
2221 || (is_gimple_call (stmt)
2222 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
2223 nt_call_phase++;
2224 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
2225 {
2226 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
2227 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
2228 }
2229 }
2230 return NULL;
2231 }
2232
2233 /* Called by walk_dominator_tree, when basic block BB is exited. */
2234 void
2235 nontrapping_dom_walker::after_dom_children (basic_block bb)
2236 {
2237 /* This BB isn't on the path to dominator root anymore. */
2238 bb->aux = (void*)2;
2239 }
2240
2241 /* We see the expression EXP in basic block BB. If it's an interesting
2242 expression of:
2243 1) MEM_REF
2244 2) ARRAY_REF
2245 3) COMPONENT_REF
2246 possibly insert the expression into the set NONTRAP or the hash table
2247 of seen expressions. STORE is true if this expression is on the LHS,
2248 otherwise it's on the RHS. */
2249 void
2250 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
2251 {
2252 HOST_WIDE_INT size;
2253
2254 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
2255 || TREE_CODE (exp) == COMPONENT_REF)
2256 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
2257 {
2258 struct ref_to_bb map;
2259 ref_to_bb **slot;
2260 struct ref_to_bb *r2bb;
2261 basic_block found_bb = 0;
2262
2263 if (!store)
2264 {
2265 tree base = get_base_address (exp);
2266 /* Only record a LOAD of a local variable without address-taken, as
2267 the local stack is always writable. This allows cselim on a STORE
2268 with a dominating LOAD. */
2269 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
2270 return;
2271 }
2272
2273 /* Try to find the last seen *_REF, which can trap. */
2274 map.exp = exp;
2275 map.size = size;
2276 slot = m_seen_refs.find_slot (&map, INSERT);
2277 r2bb = *slot;
2278 if (r2bb && r2bb->phase >= nt_call_phase)
2279 found_bb = r2bb->bb;
2280
2281 /* If we've found a trapping *_REF, _and_ it dominates EXP
2282 (it's in a basic block on the path from us to the dominator root)
2283 then we can't trap. */
2284 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
2285 {
2286 m_nontrapping->add (exp);
2287 }
2288 else
2289 {
2290 /* EXP might trap, so insert it into the hash table. */
2291 if (r2bb)
2292 {
2293 r2bb->phase = nt_call_phase;
2294 r2bb->bb = bb;
2295 }
2296 else
2297 {
2298 r2bb = XNEW (struct ref_to_bb);
2299 r2bb->phase = nt_call_phase;
2300 r2bb->bb = bb;
2301 r2bb->exp = exp;
2302 r2bb->size = size;
2303 *slot = r2bb;
2304 }
2305 }
2306 }
2307 }
2308
2309 /* This is the entry point of gathering non trapping memory accesses.
2310 It will do a dominator walk over the whole function, and it will
2311 make use of the bb->aux pointers. It returns a set of trees
2312 (the MEM_REFs itself) which can't trap. */
2313 static hash_set<tree> *
2314 get_non_trapping (void)
2315 {
2316 nt_call_phase = 0;
2317 hash_set<tree> *nontrap = new hash_set<tree>;
2318 /* We're going to do a dominator walk, so ensure that we have
2319 dominance information. */
2320 calculate_dominance_info (CDI_DOMINATORS);
2321
2322 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
2323 .walk (cfun->cfg->x_entry_block_ptr);
2324
2325 clear_aux_for_blocks ();
2326 return nontrap;
2327 }
2328
2329 /* Do the main work of conditional store replacement. We already know
2330 that the recognized pattern looks like so:
2331
2332 split:
2333 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
2334 MIDDLE_BB:
2335 something
2336 fallthrough (edge E0)
2337 JOIN_BB:
2338 some more
2339
2340 We check that MIDDLE_BB contains only one store, that that store
2341 doesn't trap (not via NOTRAP, but via checking if an access to the same
2342 memory location dominates us, or the store is to a local addressable
2343 object) and that the store has a "simple" RHS. */
2344
2345 static bool
2346 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
2347 edge e0, edge e1, hash_set<tree> *nontrap)
2348 {
2349 gimple *assign = last_and_only_stmt (middle_bb);
2350 tree lhs, rhs, name, name2;
2351 gphi *newphi;
2352 gassign *new_stmt;
2353 gimple_stmt_iterator gsi;
2354 location_t locus;
2355
2356 /* Check if middle_bb contains of only one store. */
2357 if (!assign
2358 || !gimple_assign_single_p (assign)
2359 || gimple_has_volatile_ops (assign))
2360 return false;
2361
2362 /* And no PHI nodes so all uses in the single stmt are also
2363 available where we insert to. */
2364 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
2365 return false;
2366
2367 locus = gimple_location (assign);
2368 lhs = gimple_assign_lhs (assign);
2369 rhs = gimple_assign_rhs1 (assign);
2370 if ((TREE_CODE (lhs) != MEM_REF
2371 && TREE_CODE (lhs) != ARRAY_REF
2372 && TREE_CODE (lhs) != COMPONENT_REF)
2373 || !is_gimple_reg_type (TREE_TYPE (lhs)))
2374 return false;
2375
2376 /* Prove that we can move the store down. We could also check
2377 TREE_THIS_NOTRAP here, but in that case we also could move stores,
2378 whose value is not available readily, which we want to avoid. */
2379 if (!nontrap->contains (lhs))
2380 {
2381 /* If LHS is an access to a local variable without address-taken
2382 (or when we allow data races) and known not to trap, we could
2383 always safely move down the store. */
2384 tree base = get_base_address (lhs);
2385 if (!auto_var_p (base)
2386 || (TREE_ADDRESSABLE (base) && !flag_store_data_races)
2387 || tree_could_trap_p (lhs))
2388 return false;
2389 }
2390
2391 /* Now we've checked the constraints, so do the transformation:
2392 1) Remove the single store. */
2393 gsi = gsi_for_stmt (assign);
2394 unlink_stmt_vdef (assign);
2395 gsi_remove (&gsi, true);
2396 release_defs (assign);
2397
2398 /* Make both store and load use alias-set zero as we have to
2399 deal with the case of the store being a conditional change
2400 of the dynamic type. */
2401 lhs = unshare_expr (lhs);
2402 tree *basep = &lhs;
2403 while (handled_component_p (*basep))
2404 basep = &TREE_OPERAND (*basep, 0);
2405 if (TREE_CODE (*basep) == MEM_REF
2406 || TREE_CODE (*basep) == TARGET_MEM_REF)
2407 TREE_OPERAND (*basep, 1)
2408 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
2409 else
2410 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
2411 build_fold_addr_expr (*basep),
2412 build_zero_cst (ptr_type_node));
2413
2414 /* 2) Insert a load from the memory of the store to the temporary
2415 on the edge which did not contain the store. */
2416 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2417 new_stmt = gimple_build_assign (name, lhs);
2418 gimple_set_location (new_stmt, locus);
2419 lhs = unshare_expr (lhs);
2420 /* Set TREE_NO_WARNING on the rhs of the load to avoid uninit
2421 warnings. */
2422 TREE_NO_WARNING (gimple_assign_rhs1 (new_stmt)) = 1;
2423 gsi_insert_on_edge (e1, new_stmt);
2424
2425 /* 3) Create a PHI node at the join block, with one argument
2426 holding the old RHS, and the other holding the temporary
2427 where we stored the old memory contents. */
2428 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2429 newphi = create_phi_node (name2, join_bb);
2430 add_phi_arg (newphi, rhs, e0, locus);
2431 add_phi_arg (newphi, name, e1, locus);
2432
2433 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2434
2435 /* 4) Insert that PHI node. */
2436 gsi = gsi_after_labels (join_bb);
2437 if (gsi_end_p (gsi))
2438 {
2439 gsi = gsi_last_bb (join_bb);
2440 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2441 }
2442 else
2443 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2444
2445 if (dump_file && (dump_flags & TDF_DETAILS))
2446 {
2447 fprintf (dump_file, "\nConditional store replacement happened!");
2448 fprintf (dump_file, "\nReplaced the store with a load.");
2449 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
2450 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
2451 }
2452
2453 return true;
2454 }
2455
2456 /* Do the main work of conditional store replacement. */
2457
2458 static bool
2459 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
2460 basic_block join_bb, gimple *then_assign,
2461 gimple *else_assign)
2462 {
2463 tree lhs_base, lhs, then_rhs, else_rhs, name;
2464 location_t then_locus, else_locus;
2465 gimple_stmt_iterator gsi;
2466 gphi *newphi;
2467 gassign *new_stmt;
2468
2469 if (then_assign == NULL
2470 || !gimple_assign_single_p (then_assign)
2471 || gimple_clobber_p (then_assign)
2472 || gimple_has_volatile_ops (then_assign)
2473 || else_assign == NULL
2474 || !gimple_assign_single_p (else_assign)
2475 || gimple_clobber_p (else_assign)
2476 || gimple_has_volatile_ops (else_assign))
2477 return false;
2478
2479 lhs = gimple_assign_lhs (then_assign);
2480 if (!is_gimple_reg_type (TREE_TYPE (lhs))
2481 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
2482 return false;
2483
2484 lhs_base = get_base_address (lhs);
2485 if (lhs_base == NULL_TREE
2486 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
2487 return false;
2488
2489 then_rhs = gimple_assign_rhs1 (then_assign);
2490 else_rhs = gimple_assign_rhs1 (else_assign);
2491 then_locus = gimple_location (then_assign);
2492 else_locus = gimple_location (else_assign);
2493
2494 /* Now we've checked the constraints, so do the transformation:
2495 1) Remove the stores. */
2496 gsi = gsi_for_stmt (then_assign);
2497 unlink_stmt_vdef (then_assign);
2498 gsi_remove (&gsi, true);
2499 release_defs (then_assign);
2500
2501 gsi = gsi_for_stmt (else_assign);
2502 unlink_stmt_vdef (else_assign);
2503 gsi_remove (&gsi, true);
2504 release_defs (else_assign);
2505
2506 /* 2) Create a PHI node at the join block, with one argument
2507 holding the old RHS, and the other holding the temporary
2508 where we stored the old memory contents. */
2509 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2510 newphi = create_phi_node (name, join_bb);
2511 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
2512 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
2513
2514 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2515
2516 /* 3) Insert that PHI node. */
2517 gsi = gsi_after_labels (join_bb);
2518 if (gsi_end_p (gsi))
2519 {
2520 gsi = gsi_last_bb (join_bb);
2521 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2522 }
2523 else
2524 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2525
2526 return true;
2527 }
2528
2529 /* Return the single store in BB with VDEF or NULL if there are
2530 other stores in the BB or loads following the store. */
2531
2532 static gimple *
2533 single_trailing_store_in_bb (basic_block bb, tree vdef)
2534 {
2535 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
2536 return NULL;
2537 gimple *store = SSA_NAME_DEF_STMT (vdef);
2538 if (gimple_bb (store) != bb
2539 || gimple_code (store) == GIMPLE_PHI)
2540 return NULL;
2541
2542 /* Verify there is no other store in this BB. */
2543 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
2544 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
2545 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
2546 return NULL;
2547
2548 /* Verify there is no load or store after the store. */
2549 use_operand_p use_p;
2550 imm_use_iterator imm_iter;
2551 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
2552 if (USE_STMT (use_p) != store
2553 && gimple_bb (USE_STMT (use_p)) == bb)
2554 return NULL;
2555
2556 return store;
2557 }
2558
2559 /* Conditional store replacement. We already know
2560 that the recognized pattern looks like so:
2561
2562 split:
2563 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
2564 THEN_BB:
2565 ...
2566 X = Y;
2567 ...
2568 goto JOIN_BB;
2569 ELSE_BB:
2570 ...
2571 X = Z;
2572 ...
2573 fallthrough (edge E0)
2574 JOIN_BB:
2575 some more
2576
2577 We check that it is safe to sink the store to JOIN_BB by verifying that
2578 there are no read-after-write or write-after-write dependencies in
2579 THEN_BB and ELSE_BB. */
2580
2581 static bool
2582 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
2583 basic_block join_bb)
2584 {
2585 vec<data_reference_p> then_datarefs, else_datarefs;
2586 vec<ddr_p> then_ddrs, else_ddrs;
2587 gimple *then_store, *else_store;
2588 bool found, ok = false, res;
2589 struct data_dependence_relation *ddr;
2590 data_reference_p then_dr, else_dr;
2591 int i, j;
2592 tree then_lhs, else_lhs;
2593 basic_block blocks[3];
2594
2595 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
2596 cheap enough to always handle as it allows us to elide dependence
2597 checking. */
2598 gphi *vphi = NULL;
2599 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
2600 gsi_next (&si))
2601 if (virtual_operand_p (gimple_phi_result (si.phi ())))
2602 {
2603 vphi = si.phi ();
2604 break;
2605 }
2606 if (!vphi)
2607 return false;
2608 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
2609 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
2610 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
2611 if (then_assign)
2612 {
2613 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
2614 if (else_assign)
2615 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2616 then_assign, else_assign);
2617 }
2618
2619 /* If either vectorization or if-conversion is disabled then do
2620 not sink any stores. */
2621 if (param_max_stores_to_sink == 0
2622 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
2623 || !flag_tree_loop_if_convert)
2624 return false;
2625
2626 /* Find data references. */
2627 then_datarefs.create (1);
2628 else_datarefs.create (1);
2629 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
2630 == chrec_dont_know)
2631 || !then_datarefs.length ()
2632 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
2633 == chrec_dont_know)
2634 || !else_datarefs.length ())
2635 {
2636 free_data_refs (then_datarefs);
2637 free_data_refs (else_datarefs);
2638 return false;
2639 }
2640
2641 /* Find pairs of stores with equal LHS. */
2642 auto_vec<gimple *, 1> then_stores, else_stores;
2643 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
2644 {
2645 if (DR_IS_READ (then_dr))
2646 continue;
2647
2648 then_store = DR_STMT (then_dr);
2649 then_lhs = gimple_get_lhs (then_store);
2650 if (then_lhs == NULL_TREE)
2651 continue;
2652 found = false;
2653
2654 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
2655 {
2656 if (DR_IS_READ (else_dr))
2657 continue;
2658
2659 else_store = DR_STMT (else_dr);
2660 else_lhs = gimple_get_lhs (else_store);
2661 if (else_lhs == NULL_TREE)
2662 continue;
2663
2664 if (operand_equal_p (then_lhs, else_lhs, 0))
2665 {
2666 found = true;
2667 break;
2668 }
2669 }
2670
2671 if (!found)
2672 continue;
2673
2674 then_stores.safe_push (then_store);
2675 else_stores.safe_push (else_store);
2676 }
2677
2678 /* No pairs of stores found. */
2679 if (!then_stores.length ()
2680 || then_stores.length () > (unsigned) param_max_stores_to_sink)
2681 {
2682 free_data_refs (then_datarefs);
2683 free_data_refs (else_datarefs);
2684 return false;
2685 }
2686
2687 /* Compute and check data dependencies in both basic blocks. */
2688 then_ddrs.create (1);
2689 else_ddrs.create (1);
2690 if (!compute_all_dependences (then_datarefs, &then_ddrs,
2691 vNULL, false)
2692 || !compute_all_dependences (else_datarefs, &else_ddrs,
2693 vNULL, false))
2694 {
2695 free_dependence_relations (then_ddrs);
2696 free_dependence_relations (else_ddrs);
2697 free_data_refs (then_datarefs);
2698 free_data_refs (else_datarefs);
2699 return false;
2700 }
2701 blocks[0] = then_bb;
2702 blocks[1] = else_bb;
2703 blocks[2] = join_bb;
2704 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
2705
2706 /* Check that there are no read-after-write or write-after-write dependencies
2707 in THEN_BB. */
2708 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
2709 {
2710 struct data_reference *dra = DDR_A (ddr);
2711 struct data_reference *drb = DDR_B (ddr);
2712
2713 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2714 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2715 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2716 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2717 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2718 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2719 {
2720 free_dependence_relations (then_ddrs);
2721 free_dependence_relations (else_ddrs);
2722 free_data_refs (then_datarefs);
2723 free_data_refs (else_datarefs);
2724 return false;
2725 }
2726 }
2727
2728 /* Check that there are no read-after-write or write-after-write dependencies
2729 in ELSE_BB. */
2730 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
2731 {
2732 struct data_reference *dra = DDR_A (ddr);
2733 struct data_reference *drb = DDR_B (ddr);
2734
2735 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2736 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2737 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2738 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2739 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2740 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2741 {
2742 free_dependence_relations (then_ddrs);
2743 free_dependence_relations (else_ddrs);
2744 free_data_refs (then_datarefs);
2745 free_data_refs (else_datarefs);
2746 return false;
2747 }
2748 }
2749
2750 /* Sink stores with same LHS. */
2751 FOR_EACH_VEC_ELT (then_stores, i, then_store)
2752 {
2753 else_store = else_stores[i];
2754 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2755 then_store, else_store);
2756 ok = ok || res;
2757 }
2758
2759 free_dependence_relations (then_ddrs);
2760 free_dependence_relations (else_ddrs);
2761 free_data_refs (then_datarefs);
2762 free_data_refs (else_datarefs);
2763
2764 return ok;
2765 }
2766
2767 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2768
2769 static bool
2770 local_mem_dependence (gimple *stmt, basic_block bb)
2771 {
2772 tree vuse = gimple_vuse (stmt);
2773 gimple *def;
2774
2775 if (!vuse)
2776 return false;
2777
2778 def = SSA_NAME_DEF_STMT (vuse);
2779 return (def && gimple_bb (def) == bb);
2780 }
2781
2782 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2783 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2784 and BB3 rejoins control flow following BB1 and BB2, look for
2785 opportunities to hoist loads as follows. If BB3 contains a PHI of
2786 two loads, one each occurring in BB1 and BB2, and the loads are
2787 provably of adjacent fields in the same structure, then move both
2788 loads into BB0. Of course this can only be done if there are no
2789 dependencies preventing such motion.
2790
2791 One of the hoisted loads will always be speculative, so the
2792 transformation is currently conservative:
2793
2794 - The fields must be strictly adjacent.
2795 - The two fields must occupy a single memory block that is
2796 guaranteed to not cross a page boundary.
2797
2798 The last is difficult to prove, as such memory blocks should be
2799 aligned on the minimum of the stack alignment boundary and the
2800 alignment guaranteed by heap allocation interfaces. Thus we rely
2801 on a parameter for the alignment value.
2802
2803 Provided a good value is used for the last case, the first
2804 restriction could possibly be relaxed. */
2805
2806 static void
2807 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2808 basic_block bb2, basic_block bb3)
2809 {
2810 int param_align = param_l1_cache_line_size;
2811 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2812 gphi_iterator gsi;
2813
2814 /* Walk the phis in bb3 looking for an opportunity. We are looking
2815 for phis of two SSA names, one each of which is defined in bb1 and
2816 bb2. */
2817 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2818 {
2819 gphi *phi_stmt = gsi.phi ();
2820 gimple *def1, *def2;
2821 tree arg1, arg2, ref1, ref2, field1, field2;
2822 tree tree_offset1, tree_offset2, tree_size2, next;
2823 int offset1, offset2, size2;
2824 unsigned align1;
2825 gimple_stmt_iterator gsi2;
2826 basic_block bb_for_def1, bb_for_def2;
2827
2828 if (gimple_phi_num_args (phi_stmt) != 2
2829 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2830 continue;
2831
2832 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2833 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2834
2835 if (TREE_CODE (arg1) != SSA_NAME
2836 || TREE_CODE (arg2) != SSA_NAME
2837 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2838 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2839 continue;
2840
2841 def1 = SSA_NAME_DEF_STMT (arg1);
2842 def2 = SSA_NAME_DEF_STMT (arg2);
2843
2844 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2845 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2846 continue;
2847
2848 /* Check the mode of the arguments to be sure a conditional move
2849 can be generated for it. */
2850 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2851 == CODE_FOR_nothing)
2852 continue;
2853
2854 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2855 if (!gimple_assign_single_p (def1)
2856 || !gimple_assign_single_p (def2)
2857 || gimple_has_volatile_ops (def1)
2858 || gimple_has_volatile_ops (def2))
2859 continue;
2860
2861 ref1 = gimple_assign_rhs1 (def1);
2862 ref2 = gimple_assign_rhs1 (def2);
2863
2864 if (TREE_CODE (ref1) != COMPONENT_REF
2865 || TREE_CODE (ref2) != COMPONENT_REF)
2866 continue;
2867
2868 /* The zeroth operand of the two component references must be
2869 identical. It is not sufficient to compare get_base_address of
2870 the two references, because this could allow for different
2871 elements of the same array in the two trees. It is not safe to
2872 assume that the existence of one array element implies the
2873 existence of a different one. */
2874 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2875 continue;
2876
2877 field1 = TREE_OPERAND (ref1, 1);
2878 field2 = TREE_OPERAND (ref2, 1);
2879
2880 /* Check for field adjacency, and ensure field1 comes first. */
2881 for (next = DECL_CHAIN (field1);
2882 next && TREE_CODE (next) != FIELD_DECL;
2883 next = DECL_CHAIN (next))
2884 ;
2885
2886 if (next != field2)
2887 {
2888 for (next = DECL_CHAIN (field2);
2889 next && TREE_CODE (next) != FIELD_DECL;
2890 next = DECL_CHAIN (next))
2891 ;
2892
2893 if (next != field1)
2894 continue;
2895
2896 std::swap (field1, field2);
2897 std::swap (def1, def2);
2898 }
2899
2900 bb_for_def1 = gimple_bb (def1);
2901 bb_for_def2 = gimple_bb (def2);
2902
2903 /* Check for proper alignment of the first field. */
2904 tree_offset1 = bit_position (field1);
2905 tree_offset2 = bit_position (field2);
2906 tree_size2 = DECL_SIZE (field2);
2907
2908 if (!tree_fits_uhwi_p (tree_offset1)
2909 || !tree_fits_uhwi_p (tree_offset2)
2910 || !tree_fits_uhwi_p (tree_size2))
2911 continue;
2912
2913 offset1 = tree_to_uhwi (tree_offset1);
2914 offset2 = tree_to_uhwi (tree_offset2);
2915 size2 = tree_to_uhwi (tree_size2);
2916 align1 = DECL_ALIGN (field1) % param_align_bits;
2917
2918 if (offset1 % BITS_PER_UNIT != 0)
2919 continue;
2920
2921 /* For profitability, the two field references should fit within
2922 a single cache line. */
2923 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2924 continue;
2925
2926 /* The two expressions cannot be dependent upon vdefs defined
2927 in bb1/bb2. */
2928 if (local_mem_dependence (def1, bb_for_def1)
2929 || local_mem_dependence (def2, bb_for_def2))
2930 continue;
2931
2932 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2933 bb0. We hoist the first one first so that a cache miss is handled
2934 efficiently regardless of hardware cache-fill policy. */
2935 gsi2 = gsi_for_stmt (def1);
2936 gsi_move_to_bb_end (&gsi2, bb0);
2937 gsi2 = gsi_for_stmt (def2);
2938 gsi_move_to_bb_end (&gsi2, bb0);
2939
2940 if (dump_file && (dump_flags & TDF_DETAILS))
2941 {
2942 fprintf (dump_file,
2943 "\nHoisting adjacent loads from %d and %d into %d: \n",
2944 bb_for_def1->index, bb_for_def2->index, bb0->index);
2945 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2946 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2947 }
2948 }
2949 }
2950
2951 /* Determine whether we should attempt to hoist adjacent loads out of
2952 diamond patterns in pass_phiopt. Always hoist loads if
2953 -fhoist-adjacent-loads is specified and the target machine has
2954 both a conditional move instruction and a defined cache line size. */
2955
2956 static bool
2957 gate_hoist_loads (void)
2958 {
2959 return (flag_hoist_adjacent_loads == 1
2960 && param_l1_cache_line_size
2961 && HAVE_conditional_move);
2962 }
2963
2964 /* This pass tries to replaces an if-then-else block with an
2965 assignment. We have four kinds of transformations. Some of these
2966 transformations are also performed by the ifcvt RTL optimizer.
2967
2968 Conditional Replacement
2969 -----------------------
2970
2971 This transformation, implemented in conditional_replacement,
2972 replaces
2973
2974 bb0:
2975 if (cond) goto bb2; else goto bb1;
2976 bb1:
2977 bb2:
2978 x = PHI <0 (bb1), 1 (bb0), ...>;
2979
2980 with
2981
2982 bb0:
2983 x' = cond;
2984 goto bb2;
2985 bb2:
2986 x = PHI <x' (bb0), ...>;
2987
2988 We remove bb1 as it becomes unreachable. This occurs often due to
2989 gimplification of conditionals.
2990
2991 Value Replacement
2992 -----------------
2993
2994 This transformation, implemented in value_replacement, replaces
2995
2996 bb0:
2997 if (a != b) goto bb2; else goto bb1;
2998 bb1:
2999 bb2:
3000 x = PHI <a (bb1), b (bb0), ...>;
3001
3002 with
3003
3004 bb0:
3005 bb2:
3006 x = PHI <b (bb0), ...>;
3007
3008 This opportunity can sometimes occur as a result of other
3009 optimizations.
3010
3011
3012 Another case caught by value replacement looks like this:
3013
3014 bb0:
3015 t1 = a == CONST;
3016 t2 = b > c;
3017 t3 = t1 & t2;
3018 if (t3 != 0) goto bb1; else goto bb2;
3019 bb1:
3020 bb2:
3021 x = PHI (CONST, a)
3022
3023 Gets replaced with:
3024 bb0:
3025 bb2:
3026 t1 = a == CONST;
3027 t2 = b > c;
3028 t3 = t1 & t2;
3029 x = a;
3030
3031 ABS Replacement
3032 ---------------
3033
3034 This transformation, implemented in abs_replacement, replaces
3035
3036 bb0:
3037 if (a >= 0) goto bb2; else goto bb1;
3038 bb1:
3039 x = -a;
3040 bb2:
3041 x = PHI <x (bb1), a (bb0), ...>;
3042
3043 with
3044
3045 bb0:
3046 x' = ABS_EXPR< a >;
3047 bb2:
3048 x = PHI <x' (bb0), ...>;
3049
3050 MIN/MAX Replacement
3051 -------------------
3052
3053 This transformation, minmax_replacement replaces
3054
3055 bb0:
3056 if (a <= b) goto bb2; else goto bb1;
3057 bb1:
3058 bb2:
3059 x = PHI <b (bb1), a (bb0), ...>;
3060
3061 with
3062
3063 bb0:
3064 x' = MIN_EXPR (a, b)
3065 bb2:
3066 x = PHI <x' (bb0), ...>;
3067
3068 A similar transformation is done for MAX_EXPR.
3069
3070
3071 This pass also performs a fifth transformation of a slightly different
3072 flavor.
3073
3074 Factor conversion in COND_EXPR
3075 ------------------------------
3076
3077 This transformation factors the conversion out of COND_EXPR with
3078 factor_out_conditional_conversion.
3079
3080 For example:
3081 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3082 <bb 3>:
3083 tmp = (int) a;
3084 <bb 4>:
3085 tmp = PHI <tmp, CST>
3086
3087 Into:
3088 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3089 <bb 3>:
3090 <bb 4>:
3091 a = PHI <a, CST>
3092 tmp = (int) a;
3093
3094 Adjacent Load Hoisting
3095 ----------------------
3096
3097 This transformation replaces
3098
3099 bb0:
3100 if (...) goto bb2; else goto bb1;
3101 bb1:
3102 x1 = (<expr>).field1;
3103 goto bb3;
3104 bb2:
3105 x2 = (<expr>).field2;
3106 bb3:
3107 # x = PHI <x1, x2>;
3108
3109 with
3110
3111 bb0:
3112 x1 = (<expr>).field1;
3113 x2 = (<expr>).field2;
3114 if (...) goto bb2; else goto bb1;
3115 bb1:
3116 goto bb3;
3117 bb2:
3118 bb3:
3119 # x = PHI <x1, x2>;
3120
3121 The purpose of this transformation is to enable generation of conditional
3122 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
3123 the loads is speculative, the transformation is restricted to very
3124 specific cases to avoid introducing a page fault. We are looking for
3125 the common idiom:
3126
3127 if (...)
3128 x = y->left;
3129 else
3130 x = y->right;
3131
3132 where left and right are typically adjacent pointers in a tree structure. */
3133
3134 namespace {
3135
3136 const pass_data pass_data_phiopt =
3137 {
3138 GIMPLE_PASS, /* type */
3139 "phiopt", /* name */
3140 OPTGROUP_NONE, /* optinfo_flags */
3141 TV_TREE_PHIOPT, /* tv_id */
3142 ( PROP_cfg | PROP_ssa ), /* properties_required */
3143 0, /* properties_provided */
3144 0, /* properties_destroyed */
3145 0, /* todo_flags_start */
3146 0, /* todo_flags_finish */
3147 };
3148
3149 class pass_phiopt : public gimple_opt_pass
3150 {
3151 public:
3152 pass_phiopt (gcc::context *ctxt)
3153 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
3154 {}
3155
3156 /* opt_pass methods: */
3157 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
3158 void set_pass_param (unsigned n, bool param)
3159 {
3160 gcc_assert (n == 0);
3161 early_p = param;
3162 }
3163 virtual bool gate (function *) { return flag_ssa_phiopt; }
3164 virtual unsigned int execute (function *)
3165 {
3166 return tree_ssa_phiopt_worker (false,
3167 !early_p ? gate_hoist_loads () : false,
3168 early_p);
3169 }
3170
3171 private:
3172 bool early_p;
3173 }; // class pass_phiopt
3174
3175 } // anon namespace
3176
3177 gimple_opt_pass *
3178 make_pass_phiopt (gcc::context *ctxt)
3179 {
3180 return new pass_phiopt (ctxt);
3181 }
3182
3183 namespace {
3184
3185 const pass_data pass_data_cselim =
3186 {
3187 GIMPLE_PASS, /* type */
3188 "cselim", /* name */
3189 OPTGROUP_NONE, /* optinfo_flags */
3190 TV_TREE_PHIOPT, /* tv_id */
3191 ( PROP_cfg | PROP_ssa ), /* properties_required */
3192 0, /* properties_provided */
3193 0, /* properties_destroyed */
3194 0, /* todo_flags_start */
3195 0, /* todo_flags_finish */
3196 };
3197
3198 class pass_cselim : public gimple_opt_pass
3199 {
3200 public:
3201 pass_cselim (gcc::context *ctxt)
3202 : gimple_opt_pass (pass_data_cselim, ctxt)
3203 {}
3204
3205 /* opt_pass methods: */
3206 virtual bool gate (function *) { return flag_tree_cselim; }
3207 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
3208
3209 }; // class pass_cselim
3210
3211 } // anon namespace
3212
3213 gimple_opt_pass *
3214 make_pass_cselim (gcc::context *ctxt)
3215 {
3216 return new pass_cselim (ctxt);
3217 }