]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ifcvt.c
alias.c: Remove unused headers.
[thirdparty/gcc.git] / gcc / ifcvt.c
1 /* If-conversion support.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "cfghooks.h"
28 #include "df.h"
29 #include "tm_p.h"
30 #include "expmed.h"
31 #include "optabs.h"
32 #include "regs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35
36 #include "cfgrtl.h"
37 #include "cfganal.h"
38 #include "cfgcleanup.h"
39 #include "expr.h"
40 #include "output.h"
41 #include "cfgloop.h"
42 #include "tree-pass.h"
43 #include "dbgcnt.h"
44 #include "shrink-wrap.h"
45 #include "rtl-iter.h"
46 #include "ifcvt.h"
47
48 #ifndef MAX_CONDITIONAL_EXECUTE
49 #define MAX_CONDITIONAL_EXECUTE \
50 (BRANCH_COST (optimize_function_for_speed_p (cfun), false) \
51 + 1)
52 #endif
53
54 #define IFCVT_MULTIPLE_DUMPS 1
55
56 #define NULL_BLOCK ((basic_block) NULL)
57
58 /* True if after combine pass. */
59 static bool ifcvt_after_combine;
60
61 /* True if the target has the cbranchcc4 optab. */
62 static bool have_cbranchcc4;
63
64 /* # of IF-THEN or IF-THEN-ELSE blocks we looked at */
65 static int num_possible_if_blocks;
66
67 /* # of IF-THEN or IF-THEN-ELSE blocks were converted to conditional
68 execution. */
69 static int num_updated_if_blocks;
70
71 /* # of changes made. */
72 static int num_true_changes;
73
74 /* Whether conditional execution changes were made. */
75 static int cond_exec_changed_p;
76
77 /* Forward references. */
78 static int count_bb_insns (const_basic_block);
79 static bool cheap_bb_rtx_cost_p (const_basic_block, int, int);
80 static rtx_insn *first_active_insn (basic_block);
81 static rtx_insn *last_active_insn (basic_block, int);
82 static rtx_insn *find_active_insn_before (basic_block, rtx_insn *);
83 static rtx_insn *find_active_insn_after (basic_block, rtx_insn *);
84 static basic_block block_fallthru (basic_block);
85 static int cond_exec_process_insns (ce_if_block *, rtx_insn *, rtx, rtx, int,
86 int);
87 static rtx cond_exec_get_condition (rtx_insn *);
88 static rtx noce_get_condition (rtx_insn *, rtx_insn **, bool);
89 static int noce_operand_ok (const_rtx);
90 static void merge_if_block (ce_if_block *);
91 static int find_cond_trap (basic_block, edge, edge);
92 static basic_block find_if_header (basic_block, int);
93 static int block_jumps_and_fallthru_p (basic_block, basic_block);
94 static int noce_find_if_block (basic_block, edge, edge, int);
95 static int cond_exec_find_if_block (ce_if_block *);
96 static int find_if_case_1 (basic_block, edge, edge);
97 static int find_if_case_2 (basic_block, edge, edge);
98 static int dead_or_predicable (basic_block, basic_block, basic_block,
99 edge, int);
100 static void noce_emit_move_insn (rtx, rtx);
101 static rtx_insn *block_has_only_trap (basic_block);
102 \f
103 /* Count the number of non-jump active insns in BB. */
104
105 static int
106 count_bb_insns (const_basic_block bb)
107 {
108 int count = 0;
109 rtx_insn *insn = BB_HEAD (bb);
110
111 while (1)
112 {
113 if (active_insn_p (insn) && !JUMP_P (insn))
114 count++;
115
116 if (insn == BB_END (bb))
117 break;
118 insn = NEXT_INSN (insn);
119 }
120
121 return count;
122 }
123
124 /* Determine whether the total insn_rtx_cost on non-jump insns in
125 basic block BB is less than MAX_COST. This function returns
126 false if the cost of any instruction could not be estimated.
127
128 The cost of the non-jump insns in BB is scaled by REG_BR_PROB_BASE
129 as those insns are being speculated. MAX_COST is scaled with SCALE
130 plus a small fudge factor. */
131
132 static bool
133 cheap_bb_rtx_cost_p (const_basic_block bb, int scale, int max_cost)
134 {
135 int count = 0;
136 rtx_insn *insn = BB_HEAD (bb);
137 bool speed = optimize_bb_for_speed_p (bb);
138
139 /* Set scale to REG_BR_PROB_BASE to void the identical scaling
140 applied to insn_rtx_cost when optimizing for size. Only do
141 this after combine because if-conversion might interfere with
142 passes before combine.
143
144 Use optimize_function_for_speed_p instead of the pre-defined
145 variable speed to make sure it is set to same value for all
146 basic blocks in one if-conversion transformation. */
147 if (!optimize_function_for_speed_p (cfun) && ifcvt_after_combine)
148 scale = REG_BR_PROB_BASE;
149 /* Our branch probability/scaling factors are just estimates and don't
150 account for cases where we can get speculation for free and other
151 secondary benefits. So we fudge the scale factor to make speculating
152 appear a little more profitable when optimizing for performance. */
153 else
154 scale += REG_BR_PROB_BASE / 8;
155
156
157 max_cost *= scale;
158
159 while (1)
160 {
161 if (NONJUMP_INSN_P (insn))
162 {
163 int cost = insn_rtx_cost (PATTERN (insn), speed) * REG_BR_PROB_BASE;
164 if (cost == 0)
165 return false;
166
167 /* If this instruction is the load or set of a "stack" register,
168 such as a floating point register on x87, then the cost of
169 speculatively executing this insn may need to include
170 the additional cost of popping its result off of the
171 register stack. Unfortunately, correctly recognizing and
172 accounting for this additional overhead is tricky, so for
173 now we simply prohibit such speculative execution. */
174 #ifdef STACK_REGS
175 {
176 rtx set = single_set (insn);
177 if (set && STACK_REG_P (SET_DEST (set)))
178 return false;
179 }
180 #endif
181
182 count += cost;
183 if (count >= max_cost)
184 return false;
185 }
186 else if (CALL_P (insn))
187 return false;
188
189 if (insn == BB_END (bb))
190 break;
191 insn = NEXT_INSN (insn);
192 }
193
194 return true;
195 }
196
197 /* Return the first non-jump active insn in the basic block. */
198
199 static rtx_insn *
200 first_active_insn (basic_block bb)
201 {
202 rtx_insn *insn = BB_HEAD (bb);
203
204 if (LABEL_P (insn))
205 {
206 if (insn == BB_END (bb))
207 return NULL;
208 insn = NEXT_INSN (insn);
209 }
210
211 while (NOTE_P (insn) || DEBUG_INSN_P (insn))
212 {
213 if (insn == BB_END (bb))
214 return NULL;
215 insn = NEXT_INSN (insn);
216 }
217
218 if (JUMP_P (insn))
219 return NULL;
220
221 return insn;
222 }
223
224 /* Return the last non-jump active (non-jump) insn in the basic block. */
225
226 static rtx_insn *
227 last_active_insn (basic_block bb, int skip_use_p)
228 {
229 rtx_insn *insn = BB_END (bb);
230 rtx_insn *head = BB_HEAD (bb);
231
232 while (NOTE_P (insn)
233 || JUMP_P (insn)
234 || DEBUG_INSN_P (insn)
235 || (skip_use_p
236 && NONJUMP_INSN_P (insn)
237 && GET_CODE (PATTERN (insn)) == USE))
238 {
239 if (insn == head)
240 return NULL;
241 insn = PREV_INSN (insn);
242 }
243
244 if (LABEL_P (insn))
245 return NULL;
246
247 return insn;
248 }
249
250 /* Return the active insn before INSN inside basic block CURR_BB. */
251
252 static rtx_insn *
253 find_active_insn_before (basic_block curr_bb, rtx_insn *insn)
254 {
255 if (!insn || insn == BB_HEAD (curr_bb))
256 return NULL;
257
258 while ((insn = PREV_INSN (insn)) != NULL_RTX)
259 {
260 if (NONJUMP_INSN_P (insn) || JUMP_P (insn) || CALL_P (insn))
261 break;
262
263 /* No other active insn all the way to the start of the basic block. */
264 if (insn == BB_HEAD (curr_bb))
265 return NULL;
266 }
267
268 return insn;
269 }
270
271 /* Return the active insn after INSN inside basic block CURR_BB. */
272
273 static rtx_insn *
274 find_active_insn_after (basic_block curr_bb, rtx_insn *insn)
275 {
276 if (!insn || insn == BB_END (curr_bb))
277 return NULL;
278
279 while ((insn = NEXT_INSN (insn)) != NULL_RTX)
280 {
281 if (NONJUMP_INSN_P (insn) || JUMP_P (insn) || CALL_P (insn))
282 break;
283
284 /* No other active insn all the way to the end of the basic block. */
285 if (insn == BB_END (curr_bb))
286 return NULL;
287 }
288
289 return insn;
290 }
291
292 /* Return the basic block reached by falling though the basic block BB. */
293
294 static basic_block
295 block_fallthru (basic_block bb)
296 {
297 edge e = find_fallthru_edge (bb->succs);
298
299 return (e) ? e->dest : NULL_BLOCK;
300 }
301
302 /* Return true if RTXs A and B can be safely interchanged. */
303
304 static bool
305 rtx_interchangeable_p (const_rtx a, const_rtx b)
306 {
307 if (!rtx_equal_p (a, b))
308 return false;
309
310 if (GET_CODE (a) != MEM)
311 return true;
312
313 /* A dead type-unsafe memory reference is legal, but a live type-unsafe memory
314 reference is not. Interchanging a dead type-unsafe memory reference with
315 a live type-safe one creates a live type-unsafe memory reference, in other
316 words, it makes the program illegal.
317 We check here conservatively whether the two memory references have equal
318 memory attributes. */
319
320 return mem_attrs_eq_p (get_mem_attrs (a), get_mem_attrs (b));
321 }
322
323 \f
324 /* Go through a bunch of insns, converting them to conditional
325 execution format if possible. Return TRUE if all of the non-note
326 insns were processed. */
327
328 static int
329 cond_exec_process_insns (ce_if_block *ce_info ATTRIBUTE_UNUSED,
330 /* if block information */rtx_insn *start,
331 /* first insn to look at */rtx end,
332 /* last insn to look at */rtx test,
333 /* conditional execution test */int prob_val,
334 /* probability of branch taken. */int mod_ok)
335 {
336 int must_be_last = FALSE;
337 rtx_insn *insn;
338 rtx xtest;
339 rtx pattern;
340
341 if (!start || !end)
342 return FALSE;
343
344 for (insn = start; ; insn = NEXT_INSN (insn))
345 {
346 /* dwarf2out can't cope with conditional prologues. */
347 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_PROLOGUE_END)
348 return FALSE;
349
350 if (NOTE_P (insn) || DEBUG_INSN_P (insn))
351 goto insn_done;
352
353 gcc_assert (NONJUMP_INSN_P (insn) || CALL_P (insn));
354
355 /* dwarf2out can't cope with conditional unwind info. */
356 if (RTX_FRAME_RELATED_P (insn))
357 return FALSE;
358
359 /* Remove USE insns that get in the way. */
360 if (reload_completed && GET_CODE (PATTERN (insn)) == USE)
361 {
362 /* ??? Ug. Actually unlinking the thing is problematic,
363 given what we'd have to coordinate with our callers. */
364 SET_INSN_DELETED (insn);
365 goto insn_done;
366 }
367
368 /* Last insn wasn't last? */
369 if (must_be_last)
370 return FALSE;
371
372 if (modified_in_p (test, insn))
373 {
374 if (!mod_ok)
375 return FALSE;
376 must_be_last = TRUE;
377 }
378
379 /* Now build the conditional form of the instruction. */
380 pattern = PATTERN (insn);
381 xtest = copy_rtx (test);
382
383 /* If this is already a COND_EXEC, rewrite the test to be an AND of the
384 two conditions. */
385 if (GET_CODE (pattern) == COND_EXEC)
386 {
387 if (GET_MODE (xtest) != GET_MODE (COND_EXEC_TEST (pattern)))
388 return FALSE;
389
390 xtest = gen_rtx_AND (GET_MODE (xtest), xtest,
391 COND_EXEC_TEST (pattern));
392 pattern = COND_EXEC_CODE (pattern);
393 }
394
395 pattern = gen_rtx_COND_EXEC (VOIDmode, xtest, pattern);
396
397 /* If the machine needs to modify the insn being conditionally executed,
398 say for example to force a constant integer operand into a temp
399 register, do so here. */
400 #ifdef IFCVT_MODIFY_INSN
401 IFCVT_MODIFY_INSN (ce_info, pattern, insn);
402 if (! pattern)
403 return FALSE;
404 #endif
405
406 validate_change (insn, &PATTERN (insn), pattern, 1);
407
408 if (CALL_P (insn) && prob_val >= 0)
409 validate_change (insn, &REG_NOTES (insn),
410 gen_rtx_INT_LIST ((machine_mode) REG_BR_PROB,
411 prob_val, REG_NOTES (insn)), 1);
412
413 insn_done:
414 if (insn == end)
415 break;
416 }
417
418 return TRUE;
419 }
420
421 /* Return the condition for a jump. Do not do any special processing. */
422
423 static rtx
424 cond_exec_get_condition (rtx_insn *jump)
425 {
426 rtx test_if, cond;
427
428 if (any_condjump_p (jump))
429 test_if = SET_SRC (pc_set (jump));
430 else
431 return NULL_RTX;
432 cond = XEXP (test_if, 0);
433
434 /* If this branches to JUMP_LABEL when the condition is false,
435 reverse the condition. */
436 if (GET_CODE (XEXP (test_if, 2)) == LABEL_REF
437 && LABEL_REF_LABEL (XEXP (test_if, 2)) == JUMP_LABEL (jump))
438 {
439 enum rtx_code rev = reversed_comparison_code (cond, jump);
440 if (rev == UNKNOWN)
441 return NULL_RTX;
442
443 cond = gen_rtx_fmt_ee (rev, GET_MODE (cond), XEXP (cond, 0),
444 XEXP (cond, 1));
445 }
446
447 return cond;
448 }
449
450 /* Given a simple IF-THEN or IF-THEN-ELSE block, attempt to convert it
451 to conditional execution. Return TRUE if we were successful at
452 converting the block. */
453
454 static int
455 cond_exec_process_if_block (ce_if_block * ce_info,
456 /* if block information */int do_multiple_p)
457 {
458 basic_block test_bb = ce_info->test_bb; /* last test block */
459 basic_block then_bb = ce_info->then_bb; /* THEN */
460 basic_block else_bb = ce_info->else_bb; /* ELSE or NULL */
461 rtx test_expr; /* expression in IF_THEN_ELSE that is tested */
462 rtx_insn *then_start; /* first insn in THEN block */
463 rtx_insn *then_end; /* last insn + 1 in THEN block */
464 rtx_insn *else_start = NULL; /* first insn in ELSE block or NULL */
465 rtx_insn *else_end = NULL; /* last insn + 1 in ELSE block */
466 int max; /* max # of insns to convert. */
467 int then_mod_ok; /* whether conditional mods are ok in THEN */
468 rtx true_expr; /* test for else block insns */
469 rtx false_expr; /* test for then block insns */
470 int true_prob_val; /* probability of else block */
471 int false_prob_val; /* probability of then block */
472 rtx_insn *then_last_head = NULL; /* Last match at the head of THEN */
473 rtx_insn *else_last_head = NULL; /* Last match at the head of ELSE */
474 rtx_insn *then_first_tail = NULL; /* First match at the tail of THEN */
475 rtx_insn *else_first_tail = NULL; /* First match at the tail of ELSE */
476 int then_n_insns, else_n_insns, n_insns;
477 enum rtx_code false_code;
478 rtx note;
479
480 /* If test is comprised of && or || elements, and we've failed at handling
481 all of them together, just use the last test if it is the special case of
482 && elements without an ELSE block. */
483 if (!do_multiple_p && ce_info->num_multiple_test_blocks)
484 {
485 if (else_bb || ! ce_info->and_and_p)
486 return FALSE;
487
488 ce_info->test_bb = test_bb = ce_info->last_test_bb;
489 ce_info->num_multiple_test_blocks = 0;
490 ce_info->num_and_and_blocks = 0;
491 ce_info->num_or_or_blocks = 0;
492 }
493
494 /* Find the conditional jump to the ELSE or JOIN part, and isolate
495 the test. */
496 test_expr = cond_exec_get_condition (BB_END (test_bb));
497 if (! test_expr)
498 return FALSE;
499
500 /* If the conditional jump is more than just a conditional jump,
501 then we can not do conditional execution conversion on this block. */
502 if (! onlyjump_p (BB_END (test_bb)))
503 return FALSE;
504
505 /* Collect the bounds of where we're to search, skipping any labels, jumps
506 and notes at the beginning and end of the block. Then count the total
507 number of insns and see if it is small enough to convert. */
508 then_start = first_active_insn (then_bb);
509 then_end = last_active_insn (then_bb, TRUE);
510 then_n_insns = ce_info->num_then_insns = count_bb_insns (then_bb);
511 n_insns = then_n_insns;
512 max = MAX_CONDITIONAL_EXECUTE;
513
514 if (else_bb)
515 {
516 int n_matching;
517
518 max *= 2;
519 else_start = first_active_insn (else_bb);
520 else_end = last_active_insn (else_bb, TRUE);
521 else_n_insns = ce_info->num_else_insns = count_bb_insns (else_bb);
522 n_insns += else_n_insns;
523
524 /* Look for matching sequences at the head and tail of the two blocks,
525 and limit the range of insns to be converted if possible. */
526 n_matching = flow_find_cross_jump (then_bb, else_bb,
527 &then_first_tail, &else_first_tail,
528 NULL);
529 if (then_first_tail == BB_HEAD (then_bb))
530 then_start = then_end = NULL;
531 if (else_first_tail == BB_HEAD (else_bb))
532 else_start = else_end = NULL;
533
534 if (n_matching > 0)
535 {
536 if (then_end)
537 then_end = find_active_insn_before (then_bb, then_first_tail);
538 if (else_end)
539 else_end = find_active_insn_before (else_bb, else_first_tail);
540 n_insns -= 2 * n_matching;
541 }
542
543 if (then_start
544 && else_start
545 && then_n_insns > n_matching
546 && else_n_insns > n_matching)
547 {
548 int longest_match = MIN (then_n_insns - n_matching,
549 else_n_insns - n_matching);
550 n_matching
551 = flow_find_head_matching_sequence (then_bb, else_bb,
552 &then_last_head,
553 &else_last_head,
554 longest_match);
555
556 if (n_matching > 0)
557 {
558 rtx_insn *insn;
559
560 /* We won't pass the insns in the head sequence to
561 cond_exec_process_insns, so we need to test them here
562 to make sure that they don't clobber the condition. */
563 for (insn = BB_HEAD (then_bb);
564 insn != NEXT_INSN (then_last_head);
565 insn = NEXT_INSN (insn))
566 if (!LABEL_P (insn) && !NOTE_P (insn)
567 && !DEBUG_INSN_P (insn)
568 && modified_in_p (test_expr, insn))
569 return FALSE;
570 }
571
572 if (then_last_head == then_end)
573 then_start = then_end = NULL;
574 if (else_last_head == else_end)
575 else_start = else_end = NULL;
576
577 if (n_matching > 0)
578 {
579 if (then_start)
580 then_start = find_active_insn_after (then_bb, then_last_head);
581 if (else_start)
582 else_start = find_active_insn_after (else_bb, else_last_head);
583 n_insns -= 2 * n_matching;
584 }
585 }
586 }
587
588 if (n_insns > max)
589 return FALSE;
590
591 /* Map test_expr/test_jump into the appropriate MD tests to use on
592 the conditionally executed code. */
593
594 true_expr = test_expr;
595
596 false_code = reversed_comparison_code (true_expr, BB_END (test_bb));
597 if (false_code != UNKNOWN)
598 false_expr = gen_rtx_fmt_ee (false_code, GET_MODE (true_expr),
599 XEXP (true_expr, 0), XEXP (true_expr, 1));
600 else
601 false_expr = NULL_RTX;
602
603 #ifdef IFCVT_MODIFY_TESTS
604 /* If the machine description needs to modify the tests, such as setting a
605 conditional execution register from a comparison, it can do so here. */
606 IFCVT_MODIFY_TESTS (ce_info, true_expr, false_expr);
607
608 /* See if the conversion failed. */
609 if (!true_expr || !false_expr)
610 goto fail;
611 #endif
612
613 note = find_reg_note (BB_END (test_bb), REG_BR_PROB, NULL_RTX);
614 if (note)
615 {
616 true_prob_val = XINT (note, 0);
617 false_prob_val = REG_BR_PROB_BASE - true_prob_val;
618 }
619 else
620 {
621 true_prob_val = -1;
622 false_prob_val = -1;
623 }
624
625 /* If we have && or || tests, do them here. These tests are in the adjacent
626 blocks after the first block containing the test. */
627 if (ce_info->num_multiple_test_blocks > 0)
628 {
629 basic_block bb = test_bb;
630 basic_block last_test_bb = ce_info->last_test_bb;
631
632 if (! false_expr)
633 goto fail;
634
635 do
636 {
637 rtx_insn *start, *end;
638 rtx t, f;
639 enum rtx_code f_code;
640
641 bb = block_fallthru (bb);
642 start = first_active_insn (bb);
643 end = last_active_insn (bb, TRUE);
644 if (start
645 && ! cond_exec_process_insns (ce_info, start, end, false_expr,
646 false_prob_val, FALSE))
647 goto fail;
648
649 /* If the conditional jump is more than just a conditional jump, then
650 we can not do conditional execution conversion on this block. */
651 if (! onlyjump_p (BB_END (bb)))
652 goto fail;
653
654 /* Find the conditional jump and isolate the test. */
655 t = cond_exec_get_condition (BB_END (bb));
656 if (! t)
657 goto fail;
658
659 f_code = reversed_comparison_code (t, BB_END (bb));
660 if (f_code == UNKNOWN)
661 goto fail;
662
663 f = gen_rtx_fmt_ee (f_code, GET_MODE (t), XEXP (t, 0), XEXP (t, 1));
664 if (ce_info->and_and_p)
665 {
666 t = gen_rtx_AND (GET_MODE (t), true_expr, t);
667 f = gen_rtx_IOR (GET_MODE (t), false_expr, f);
668 }
669 else
670 {
671 t = gen_rtx_IOR (GET_MODE (t), true_expr, t);
672 f = gen_rtx_AND (GET_MODE (t), false_expr, f);
673 }
674
675 /* If the machine description needs to modify the tests, such as
676 setting a conditional execution register from a comparison, it can
677 do so here. */
678 #ifdef IFCVT_MODIFY_MULTIPLE_TESTS
679 IFCVT_MODIFY_MULTIPLE_TESTS (ce_info, bb, t, f);
680
681 /* See if the conversion failed. */
682 if (!t || !f)
683 goto fail;
684 #endif
685
686 true_expr = t;
687 false_expr = f;
688 }
689 while (bb != last_test_bb);
690 }
691
692 /* For IF-THEN-ELSE blocks, we don't allow modifications of the test
693 on then THEN block. */
694 then_mod_ok = (else_bb == NULL_BLOCK);
695
696 /* Go through the THEN and ELSE blocks converting the insns if possible
697 to conditional execution. */
698
699 if (then_end
700 && (! false_expr
701 || ! cond_exec_process_insns (ce_info, then_start, then_end,
702 false_expr, false_prob_val,
703 then_mod_ok)))
704 goto fail;
705
706 if (else_bb && else_end
707 && ! cond_exec_process_insns (ce_info, else_start, else_end,
708 true_expr, true_prob_val, TRUE))
709 goto fail;
710
711 /* If we cannot apply the changes, fail. Do not go through the normal fail
712 processing, since apply_change_group will call cancel_changes. */
713 if (! apply_change_group ())
714 {
715 #ifdef IFCVT_MODIFY_CANCEL
716 /* Cancel any machine dependent changes. */
717 IFCVT_MODIFY_CANCEL (ce_info);
718 #endif
719 return FALSE;
720 }
721
722 #ifdef IFCVT_MODIFY_FINAL
723 /* Do any machine dependent final modifications. */
724 IFCVT_MODIFY_FINAL (ce_info);
725 #endif
726
727 /* Conversion succeeded. */
728 if (dump_file)
729 fprintf (dump_file, "%d insn%s converted to conditional execution.\n",
730 n_insns, (n_insns == 1) ? " was" : "s were");
731
732 /* Merge the blocks! If we had matching sequences, make sure to delete one
733 copy at the appropriate location first: delete the copy in the THEN branch
734 for a tail sequence so that the remaining one is executed last for both
735 branches, and delete the copy in the ELSE branch for a head sequence so
736 that the remaining one is executed first for both branches. */
737 if (then_first_tail)
738 {
739 rtx_insn *from = then_first_tail;
740 if (!INSN_P (from))
741 from = find_active_insn_after (then_bb, from);
742 delete_insn_chain (from, BB_END (then_bb), false);
743 }
744 if (else_last_head)
745 delete_insn_chain (first_active_insn (else_bb), else_last_head, false);
746
747 merge_if_block (ce_info);
748 cond_exec_changed_p = TRUE;
749 return TRUE;
750
751 fail:
752 #ifdef IFCVT_MODIFY_CANCEL
753 /* Cancel any machine dependent changes. */
754 IFCVT_MODIFY_CANCEL (ce_info);
755 #endif
756
757 cancel_changes (0);
758 return FALSE;
759 }
760 \f
761 /* Used by noce_process_if_block to communicate with its subroutines.
762
763 The subroutines know that A and B may be evaluated freely. They
764 know that X is a register. They should insert new instructions
765 before cond_earliest. */
766
767 struct noce_if_info
768 {
769 /* The basic blocks that make up the IF-THEN-{ELSE-,}JOIN block. */
770 basic_block test_bb, then_bb, else_bb, join_bb;
771
772 /* The jump that ends TEST_BB. */
773 rtx_insn *jump;
774
775 /* The jump condition. */
776 rtx cond;
777
778 /* New insns should be inserted before this one. */
779 rtx_insn *cond_earliest;
780
781 /* Insns in the THEN and ELSE block. There is always just this
782 one insns in those blocks. The insns are single_set insns.
783 If there was no ELSE block, INSN_B is the last insn before
784 COND_EARLIEST, or NULL_RTX. In the former case, the insn
785 operands are still valid, as if INSN_B was moved down below
786 the jump. */
787 rtx_insn *insn_a, *insn_b;
788
789 /* The SET_SRC of INSN_A and INSN_B. */
790 rtx a, b;
791
792 /* The SET_DEST of INSN_A. */
793 rtx x;
794
795 /* True if this if block is not canonical. In the canonical form of
796 if blocks, the THEN_BB is the block reached via the fallthru edge
797 from TEST_BB. For the noce transformations, we allow the symmetric
798 form as well. */
799 bool then_else_reversed;
800
801 /* True if the contents of then_bb and else_bb are a
802 simple single set instruction. */
803 bool then_simple;
804 bool else_simple;
805
806 /* The total rtx cost of the instructions in then_bb and else_bb. */
807 unsigned int then_cost;
808 unsigned int else_cost;
809
810 /* Estimated cost of the particular branch instruction. */
811 unsigned int branch_cost;
812 };
813
814 static rtx noce_emit_store_flag (struct noce_if_info *, rtx, int, int);
815 static int noce_try_move (struct noce_if_info *);
816 static int noce_try_store_flag (struct noce_if_info *);
817 static int noce_try_addcc (struct noce_if_info *);
818 static int noce_try_store_flag_constants (struct noce_if_info *);
819 static int noce_try_store_flag_mask (struct noce_if_info *);
820 static rtx noce_emit_cmove (struct noce_if_info *, rtx, enum rtx_code, rtx,
821 rtx, rtx, rtx);
822 static int noce_try_cmove (struct noce_if_info *);
823 static int noce_try_cmove_arith (struct noce_if_info *);
824 static rtx noce_get_alt_condition (struct noce_if_info *, rtx, rtx_insn **);
825 static int noce_try_minmax (struct noce_if_info *);
826 static int noce_try_abs (struct noce_if_info *);
827 static int noce_try_sign_mask (struct noce_if_info *);
828
829 /* Helper function for noce_try_store_flag*. */
830
831 static rtx
832 noce_emit_store_flag (struct noce_if_info *if_info, rtx x, int reversep,
833 int normalize)
834 {
835 rtx cond = if_info->cond;
836 int cond_complex;
837 enum rtx_code code;
838
839 cond_complex = (! general_operand (XEXP (cond, 0), VOIDmode)
840 || ! general_operand (XEXP (cond, 1), VOIDmode));
841
842 /* If earliest == jump, or when the condition is complex, try to
843 build the store_flag insn directly. */
844
845 if (cond_complex)
846 {
847 rtx set = pc_set (if_info->jump);
848 cond = XEXP (SET_SRC (set), 0);
849 if (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
850 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (if_info->jump))
851 reversep = !reversep;
852 if (if_info->then_else_reversed)
853 reversep = !reversep;
854 }
855
856 if (reversep)
857 code = reversed_comparison_code (cond, if_info->jump);
858 else
859 code = GET_CODE (cond);
860
861 if ((if_info->cond_earliest == if_info->jump || cond_complex)
862 && (normalize == 0 || STORE_FLAG_VALUE == normalize))
863 {
864 rtx src = gen_rtx_fmt_ee (code, GET_MODE (x), XEXP (cond, 0),
865 XEXP (cond, 1));
866 rtx set = gen_rtx_SET (x, src);
867
868 start_sequence ();
869 rtx_insn *insn = emit_insn (set);
870
871 if (recog_memoized (insn) >= 0)
872 {
873 rtx_insn *seq = get_insns ();
874 end_sequence ();
875 emit_insn (seq);
876
877 if_info->cond_earliest = if_info->jump;
878
879 return x;
880 }
881
882 end_sequence ();
883 }
884
885 /* Don't even try if the comparison operands or the mode of X are weird. */
886 if (cond_complex || !SCALAR_INT_MODE_P (GET_MODE (x)))
887 return NULL_RTX;
888
889 return emit_store_flag (x, code, XEXP (cond, 0),
890 XEXP (cond, 1), VOIDmode,
891 (code == LTU || code == LEU
892 || code == GEU || code == GTU), normalize);
893 }
894
895 /* Emit instruction to move an rtx, possibly into STRICT_LOW_PART.
896 X is the destination/target and Y is the value to copy. */
897
898 static void
899 noce_emit_move_insn (rtx x, rtx y)
900 {
901 machine_mode outmode;
902 rtx outer, inner;
903 int bitpos;
904
905 if (GET_CODE (x) != STRICT_LOW_PART)
906 {
907 rtx_insn *seq, *insn;
908 rtx target;
909 optab ot;
910
911 start_sequence ();
912 /* Check that the SET_SRC is reasonable before calling emit_move_insn,
913 otherwise construct a suitable SET pattern ourselves. */
914 insn = (OBJECT_P (y) || CONSTANT_P (y) || GET_CODE (y) == SUBREG)
915 ? emit_move_insn (x, y)
916 : emit_insn (gen_rtx_SET (x, y));
917 seq = get_insns ();
918 end_sequence ();
919
920 if (recog_memoized (insn) <= 0)
921 {
922 if (GET_CODE (x) == ZERO_EXTRACT)
923 {
924 rtx op = XEXP (x, 0);
925 unsigned HOST_WIDE_INT size = INTVAL (XEXP (x, 1));
926 unsigned HOST_WIDE_INT start = INTVAL (XEXP (x, 2));
927
928 /* store_bit_field expects START to be relative to
929 BYTES_BIG_ENDIAN and adjusts this value for machines with
930 BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN. In order to be able to
931 invoke store_bit_field again it is necessary to have the START
932 value from the first call. */
933 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
934 {
935 if (MEM_P (op))
936 start = BITS_PER_UNIT - start - size;
937 else
938 {
939 gcc_assert (REG_P (op));
940 start = BITS_PER_WORD - start - size;
941 }
942 }
943
944 gcc_assert (start < (MEM_P (op) ? BITS_PER_UNIT : BITS_PER_WORD));
945 store_bit_field (op, size, start, 0, 0, GET_MODE (x), y);
946 return;
947 }
948
949 switch (GET_RTX_CLASS (GET_CODE (y)))
950 {
951 case RTX_UNARY:
952 ot = code_to_optab (GET_CODE (y));
953 if (ot)
954 {
955 start_sequence ();
956 target = expand_unop (GET_MODE (y), ot, XEXP (y, 0), x, 0);
957 if (target != NULL_RTX)
958 {
959 if (target != x)
960 emit_move_insn (x, target);
961 seq = get_insns ();
962 }
963 end_sequence ();
964 }
965 break;
966
967 case RTX_BIN_ARITH:
968 case RTX_COMM_ARITH:
969 ot = code_to_optab (GET_CODE (y));
970 if (ot)
971 {
972 start_sequence ();
973 target = expand_binop (GET_MODE (y), ot,
974 XEXP (y, 0), XEXP (y, 1),
975 x, 0, OPTAB_DIRECT);
976 if (target != NULL_RTX)
977 {
978 if (target != x)
979 emit_move_insn (x, target);
980 seq = get_insns ();
981 }
982 end_sequence ();
983 }
984 break;
985
986 default:
987 break;
988 }
989 }
990
991 emit_insn (seq);
992 return;
993 }
994
995 outer = XEXP (x, 0);
996 inner = XEXP (outer, 0);
997 outmode = GET_MODE (outer);
998 bitpos = SUBREG_BYTE (outer) * BITS_PER_UNIT;
999 store_bit_field (inner, GET_MODE_BITSIZE (outmode), bitpos,
1000 0, 0, outmode, y);
1001 }
1002
1003 /* Return the CC reg if it is used in COND. */
1004
1005 static rtx
1006 cc_in_cond (rtx cond)
1007 {
1008 if (have_cbranchcc4 && cond
1009 && GET_MODE_CLASS (GET_MODE (XEXP (cond, 0))) == MODE_CC)
1010 return XEXP (cond, 0);
1011
1012 return NULL_RTX;
1013 }
1014
1015 /* Return sequence of instructions generated by if conversion. This
1016 function calls end_sequence() to end the current stream, ensures
1017 that the instructions are unshared, recognizable non-jump insns.
1018 On failure, this function returns a NULL_RTX. */
1019
1020 static rtx_insn *
1021 end_ifcvt_sequence (struct noce_if_info *if_info)
1022 {
1023 rtx_insn *insn;
1024 rtx_insn *seq = get_insns ();
1025 rtx cc = cc_in_cond (if_info->cond);
1026
1027 set_used_flags (if_info->x);
1028 set_used_flags (if_info->cond);
1029 set_used_flags (if_info->a);
1030 set_used_flags (if_info->b);
1031
1032 for (insn = seq; insn; insn = NEXT_INSN (insn))
1033 set_used_flags (insn);
1034
1035 unshare_all_rtl_in_chain (seq);
1036 end_sequence ();
1037
1038 /* Make sure that all of the instructions emitted are recognizable,
1039 and that we haven't introduced a new jump instruction.
1040 As an exercise for the reader, build a general mechanism that
1041 allows proper placement of required clobbers. */
1042 for (insn = seq; insn; insn = NEXT_INSN (insn))
1043 if (JUMP_P (insn)
1044 || recog_memoized (insn) == -1
1045 /* Make sure new generated code does not clobber CC. */
1046 || (cc && set_of (cc, insn)))
1047 return NULL;
1048
1049 return seq;
1050 }
1051
1052 /* Return true iff the then and else basic block (if it exists)
1053 consist of a single simple set instruction. */
1054
1055 static bool
1056 noce_simple_bbs (struct noce_if_info *if_info)
1057 {
1058 if (!if_info->then_simple)
1059 return false;
1060
1061 if (if_info->else_bb)
1062 return if_info->else_simple;
1063
1064 return true;
1065 }
1066
1067 /* Convert "if (a != b) x = a; else x = b" into "x = a" and
1068 "if (a == b) x = a; else x = b" into "x = b". */
1069
1070 static int
1071 noce_try_move (struct noce_if_info *if_info)
1072 {
1073 rtx cond = if_info->cond;
1074 enum rtx_code code = GET_CODE (cond);
1075 rtx y;
1076 rtx_insn *seq;
1077
1078 if (code != NE && code != EQ)
1079 return FALSE;
1080
1081 if (!noce_simple_bbs (if_info))
1082 return FALSE;
1083
1084 /* This optimization isn't valid if either A or B could be a NaN
1085 or a signed zero. */
1086 if (HONOR_NANS (if_info->x)
1087 || HONOR_SIGNED_ZEROS (if_info->x))
1088 return FALSE;
1089
1090 /* Check whether the operands of the comparison are A and in
1091 either order. */
1092 if ((rtx_equal_p (if_info->a, XEXP (cond, 0))
1093 && rtx_equal_p (if_info->b, XEXP (cond, 1)))
1094 || (rtx_equal_p (if_info->a, XEXP (cond, 1))
1095 && rtx_equal_p (if_info->b, XEXP (cond, 0))))
1096 {
1097 if (!rtx_interchangeable_p (if_info->a, if_info->b))
1098 return FALSE;
1099
1100 y = (code == EQ) ? if_info->a : if_info->b;
1101
1102 /* Avoid generating the move if the source is the destination. */
1103 if (! rtx_equal_p (if_info->x, y))
1104 {
1105 start_sequence ();
1106 noce_emit_move_insn (if_info->x, y);
1107 seq = end_ifcvt_sequence (if_info);
1108 if (!seq)
1109 return FALSE;
1110
1111 emit_insn_before_setloc (seq, if_info->jump,
1112 INSN_LOCATION (if_info->insn_a));
1113 }
1114 return TRUE;
1115 }
1116 return FALSE;
1117 }
1118
1119 /* Convert "if (test) x = 1; else x = 0".
1120
1121 Only try 0 and STORE_FLAG_VALUE here. Other combinations will be
1122 tried in noce_try_store_flag_constants after noce_try_cmove has had
1123 a go at the conversion. */
1124
1125 static int
1126 noce_try_store_flag (struct noce_if_info *if_info)
1127 {
1128 int reversep;
1129 rtx target;
1130 rtx_insn *seq;
1131
1132 if (!noce_simple_bbs (if_info))
1133 return FALSE;
1134
1135 if (CONST_INT_P (if_info->b)
1136 && INTVAL (if_info->b) == STORE_FLAG_VALUE
1137 && if_info->a == const0_rtx)
1138 reversep = 0;
1139 else if (if_info->b == const0_rtx
1140 && CONST_INT_P (if_info->a)
1141 && INTVAL (if_info->a) == STORE_FLAG_VALUE
1142 && (reversed_comparison_code (if_info->cond, if_info->jump)
1143 != UNKNOWN))
1144 reversep = 1;
1145 else
1146 return FALSE;
1147
1148 start_sequence ();
1149
1150 target = noce_emit_store_flag (if_info, if_info->x, reversep, 0);
1151 if (target)
1152 {
1153 if (target != if_info->x)
1154 noce_emit_move_insn (if_info->x, target);
1155
1156 seq = end_ifcvt_sequence (if_info);
1157 if (! seq)
1158 return FALSE;
1159
1160 emit_insn_before_setloc (seq, if_info->jump,
1161 INSN_LOCATION (if_info->insn_a));
1162 return TRUE;
1163 }
1164 else
1165 {
1166 end_sequence ();
1167 return FALSE;
1168 }
1169 }
1170
1171 /* Convert "if (test) x = a; else x = b", for A and B constant.
1172 Also allow A = y + c1, B = y + c2, with a common y between A
1173 and B. */
1174
1175 static int
1176 noce_try_store_flag_constants (struct noce_if_info *if_info)
1177 {
1178 rtx target;
1179 rtx_insn *seq;
1180 bool reversep;
1181 HOST_WIDE_INT itrue, ifalse, diff, tmp;
1182 int normalize;
1183 bool can_reverse;
1184 machine_mode mode = GET_MODE (if_info->x);;
1185 rtx common = NULL_RTX;
1186
1187 rtx a = if_info->a;
1188 rtx b = if_info->b;
1189
1190 /* Handle cases like x := test ? y + 3 : y + 4. */
1191 if (GET_CODE (a) == PLUS
1192 && GET_CODE (b) == PLUS
1193 && CONST_INT_P (XEXP (a, 1))
1194 && CONST_INT_P (XEXP (b, 1))
1195 && rtx_equal_p (XEXP (a, 0), XEXP (b, 0))
1196 && noce_operand_ok (XEXP (a, 0))
1197 && if_info->branch_cost >= 2)
1198 {
1199 common = XEXP (a, 0);
1200 a = XEXP (a, 1);
1201 b = XEXP (b, 1);
1202 }
1203
1204 if (!noce_simple_bbs (if_info))
1205 return FALSE;
1206
1207 if (CONST_INT_P (a)
1208 && CONST_INT_P (b))
1209 {
1210 ifalse = INTVAL (a);
1211 itrue = INTVAL (b);
1212 bool subtract_flag_p = false;
1213
1214 diff = (unsigned HOST_WIDE_INT) itrue - ifalse;
1215 /* Make sure we can represent the difference between the two values. */
1216 if ((diff > 0)
1217 != ((ifalse < 0) != (itrue < 0) ? ifalse < 0 : ifalse < itrue))
1218 return FALSE;
1219
1220 diff = trunc_int_for_mode (diff, mode);
1221
1222 can_reverse = (reversed_comparison_code (if_info->cond, if_info->jump)
1223 != UNKNOWN);
1224
1225 reversep = false;
1226 if (diff == STORE_FLAG_VALUE || diff == -STORE_FLAG_VALUE)
1227 {
1228 normalize = 0;
1229 /* We could collapse these cases but it is easier to follow the
1230 diff/STORE_FLAG_VALUE combinations when they are listed
1231 explicitly. */
1232
1233 /* test ? 3 : 4
1234 => 4 + (test != 0). */
1235 if (diff < 0 && STORE_FLAG_VALUE < 0)
1236 reversep = false;
1237 /* test ? 4 : 3
1238 => can_reverse | 4 + (test == 0)
1239 !can_reverse | 3 - (test != 0). */
1240 else if (diff > 0 && STORE_FLAG_VALUE < 0)
1241 {
1242 reversep = can_reverse;
1243 subtract_flag_p = !can_reverse;
1244 /* If we need to subtract the flag and we have PLUS-immediate
1245 A and B then it is unlikely to be beneficial to play tricks
1246 here. */
1247 if (subtract_flag_p && common)
1248 return FALSE;
1249 }
1250 /* test ? 3 : 4
1251 => can_reverse | 3 + (test == 0)
1252 !can_reverse | 4 - (test != 0). */
1253 else if (diff < 0 && STORE_FLAG_VALUE > 0)
1254 {
1255 reversep = can_reverse;
1256 subtract_flag_p = !can_reverse;
1257 /* If we need to subtract the flag and we have PLUS-immediate
1258 A and B then it is unlikely to be beneficial to play tricks
1259 here. */
1260 if (subtract_flag_p && common)
1261 return FALSE;
1262 }
1263 /* test ? 4 : 3
1264 => 4 + (test != 0). */
1265 else if (diff > 0 && STORE_FLAG_VALUE > 0)
1266 reversep = false;
1267 else
1268 gcc_unreachable ();
1269 }
1270 else if (ifalse == 0 && exact_log2 (itrue) >= 0
1271 && (STORE_FLAG_VALUE == 1
1272 || if_info->branch_cost >= 2))
1273 normalize = 1;
1274 else if (itrue == 0 && exact_log2 (ifalse) >= 0 && can_reverse
1275 && (STORE_FLAG_VALUE == 1 || if_info->branch_cost >= 2))
1276 {
1277 normalize = 1;
1278 reversep = true;
1279 }
1280 else if (itrue == -1
1281 && (STORE_FLAG_VALUE == -1
1282 || if_info->branch_cost >= 2))
1283 normalize = -1;
1284 else if (ifalse == -1 && can_reverse
1285 && (STORE_FLAG_VALUE == -1 || if_info->branch_cost >= 2))
1286 {
1287 normalize = -1;
1288 reversep = true;
1289 }
1290 else
1291 return FALSE;
1292
1293 if (reversep)
1294 {
1295 std::swap (itrue, ifalse);
1296 diff = trunc_int_for_mode (-(unsigned HOST_WIDE_INT) diff, mode);
1297 }
1298
1299 start_sequence ();
1300
1301 /* If we have x := test ? x + 3 : x + 4 then move the original
1302 x out of the way while we store flags. */
1303 if (common && rtx_equal_p (common, if_info->x))
1304 {
1305 common = gen_reg_rtx (mode);
1306 noce_emit_move_insn (common, if_info->x);
1307 }
1308
1309 target = noce_emit_store_flag (if_info, if_info->x, reversep, normalize);
1310 if (! target)
1311 {
1312 end_sequence ();
1313 return FALSE;
1314 }
1315
1316 /* if (test) x = 3; else x = 4;
1317 => x = 3 + (test == 0); */
1318 if (diff == STORE_FLAG_VALUE || diff == -STORE_FLAG_VALUE)
1319 {
1320 /* Add the common part now. This may allow combine to merge this
1321 with the store flag operation earlier into some sort of conditional
1322 increment/decrement if the target allows it. */
1323 if (common)
1324 target = expand_simple_binop (mode, PLUS,
1325 target, common,
1326 target, 0, OPTAB_WIDEN);
1327
1328 /* Always use ifalse here. It should have been swapped with itrue
1329 when appropriate when reversep is true. */
1330 target = expand_simple_binop (mode, subtract_flag_p ? MINUS : PLUS,
1331 gen_int_mode (ifalse, mode), target,
1332 if_info->x, 0, OPTAB_WIDEN);
1333 }
1334 /* Other cases are not beneficial when the original A and B are PLUS
1335 expressions. */
1336 else if (common)
1337 {
1338 end_sequence ();
1339 return FALSE;
1340 }
1341 /* if (test) x = 8; else x = 0;
1342 => x = (test != 0) << 3; */
1343 else if (ifalse == 0 && (tmp = exact_log2 (itrue)) >= 0)
1344 {
1345 target = expand_simple_binop (mode, ASHIFT,
1346 target, GEN_INT (tmp), if_info->x, 0,
1347 OPTAB_WIDEN);
1348 }
1349
1350 /* if (test) x = -1; else x = b;
1351 => x = -(test != 0) | b; */
1352 else if (itrue == -1)
1353 {
1354 target = expand_simple_binop (mode, IOR,
1355 target, gen_int_mode (ifalse, mode),
1356 if_info->x, 0, OPTAB_WIDEN);
1357 }
1358 else
1359 {
1360 end_sequence ();
1361 return FALSE;
1362 }
1363
1364 if (! target)
1365 {
1366 end_sequence ();
1367 return FALSE;
1368 }
1369
1370 if (target != if_info->x)
1371 noce_emit_move_insn (if_info->x, target);
1372
1373 seq = end_ifcvt_sequence (if_info);
1374 if (!seq)
1375 return FALSE;
1376
1377 emit_insn_before_setloc (seq, if_info->jump,
1378 INSN_LOCATION (if_info->insn_a));
1379 return TRUE;
1380 }
1381
1382 return FALSE;
1383 }
1384
1385 /* Convert "if (test) foo++" into "foo += (test != 0)", and
1386 similarly for "foo--". */
1387
1388 static int
1389 noce_try_addcc (struct noce_if_info *if_info)
1390 {
1391 rtx target;
1392 rtx_insn *seq;
1393 int subtract, normalize;
1394
1395 if (!noce_simple_bbs (if_info))
1396 return FALSE;
1397
1398 if (GET_CODE (if_info->a) == PLUS
1399 && rtx_equal_p (XEXP (if_info->a, 0), if_info->b)
1400 && (reversed_comparison_code (if_info->cond, if_info->jump)
1401 != UNKNOWN))
1402 {
1403 rtx cond = if_info->cond;
1404 enum rtx_code code = reversed_comparison_code (cond, if_info->jump);
1405
1406 /* First try to use addcc pattern. */
1407 if (general_operand (XEXP (cond, 0), VOIDmode)
1408 && general_operand (XEXP (cond, 1), VOIDmode))
1409 {
1410 start_sequence ();
1411 target = emit_conditional_add (if_info->x, code,
1412 XEXP (cond, 0),
1413 XEXP (cond, 1),
1414 VOIDmode,
1415 if_info->b,
1416 XEXP (if_info->a, 1),
1417 GET_MODE (if_info->x),
1418 (code == LTU || code == GEU
1419 || code == LEU || code == GTU));
1420 if (target)
1421 {
1422 if (target != if_info->x)
1423 noce_emit_move_insn (if_info->x, target);
1424
1425 seq = end_ifcvt_sequence (if_info);
1426 if (!seq)
1427 return FALSE;
1428
1429 emit_insn_before_setloc (seq, if_info->jump,
1430 INSN_LOCATION (if_info->insn_a));
1431 return TRUE;
1432 }
1433 end_sequence ();
1434 }
1435
1436 /* If that fails, construct conditional increment or decrement using
1437 setcc. */
1438 if (if_info->branch_cost >= 2
1439 && (XEXP (if_info->a, 1) == const1_rtx
1440 || XEXP (if_info->a, 1) == constm1_rtx))
1441 {
1442 start_sequence ();
1443 if (STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
1444 subtract = 0, normalize = 0;
1445 else if (-STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
1446 subtract = 1, normalize = 0;
1447 else
1448 subtract = 0, normalize = INTVAL (XEXP (if_info->a, 1));
1449
1450
1451 target = noce_emit_store_flag (if_info,
1452 gen_reg_rtx (GET_MODE (if_info->x)),
1453 1, normalize);
1454
1455 if (target)
1456 target = expand_simple_binop (GET_MODE (if_info->x),
1457 subtract ? MINUS : PLUS,
1458 if_info->b, target, if_info->x,
1459 0, OPTAB_WIDEN);
1460 if (target)
1461 {
1462 if (target != if_info->x)
1463 noce_emit_move_insn (if_info->x, target);
1464
1465 seq = end_ifcvt_sequence (if_info);
1466 if (!seq)
1467 return FALSE;
1468
1469 emit_insn_before_setloc (seq, if_info->jump,
1470 INSN_LOCATION (if_info->insn_a));
1471 return TRUE;
1472 }
1473 end_sequence ();
1474 }
1475 }
1476
1477 return FALSE;
1478 }
1479
1480 /* Convert "if (test) x = 0;" to "x &= -(test == 0);" */
1481
1482 static int
1483 noce_try_store_flag_mask (struct noce_if_info *if_info)
1484 {
1485 rtx target;
1486 rtx_insn *seq;
1487 int reversep;
1488
1489 if (!noce_simple_bbs (if_info))
1490 return FALSE;
1491
1492 reversep = 0;
1493 if ((if_info->branch_cost >= 2
1494 || STORE_FLAG_VALUE == -1)
1495 && ((if_info->a == const0_rtx
1496 && rtx_equal_p (if_info->b, if_info->x))
1497 || ((reversep = (reversed_comparison_code (if_info->cond,
1498 if_info->jump)
1499 != UNKNOWN))
1500 && if_info->b == const0_rtx
1501 && rtx_equal_p (if_info->a, if_info->x))))
1502 {
1503 start_sequence ();
1504 target = noce_emit_store_flag (if_info,
1505 gen_reg_rtx (GET_MODE (if_info->x)),
1506 reversep, -1);
1507 if (target)
1508 target = expand_simple_binop (GET_MODE (if_info->x), AND,
1509 if_info->x,
1510 target, if_info->x, 0,
1511 OPTAB_WIDEN);
1512
1513 if (target)
1514 {
1515 int old_cost, new_cost, insn_cost;
1516 int speed_p;
1517
1518 if (target != if_info->x)
1519 noce_emit_move_insn (if_info->x, target);
1520
1521 seq = end_ifcvt_sequence (if_info);
1522 if (!seq)
1523 return FALSE;
1524
1525 speed_p = optimize_bb_for_speed_p (BLOCK_FOR_INSN (if_info->insn_a));
1526 insn_cost = insn_rtx_cost (PATTERN (if_info->insn_a), speed_p);
1527 old_cost = COSTS_N_INSNS (if_info->branch_cost) + insn_cost;
1528 new_cost = seq_cost (seq, speed_p);
1529
1530 if (new_cost > old_cost)
1531 return FALSE;
1532
1533 emit_insn_before_setloc (seq, if_info->jump,
1534 INSN_LOCATION (if_info->insn_a));
1535 return TRUE;
1536 }
1537
1538 end_sequence ();
1539 }
1540
1541 return FALSE;
1542 }
1543
1544 /* Helper function for noce_try_cmove and noce_try_cmove_arith. */
1545
1546 static rtx
1547 noce_emit_cmove (struct noce_if_info *if_info, rtx x, enum rtx_code code,
1548 rtx cmp_a, rtx cmp_b, rtx vfalse, rtx vtrue)
1549 {
1550 rtx target ATTRIBUTE_UNUSED;
1551 int unsignedp ATTRIBUTE_UNUSED;
1552
1553 /* If earliest == jump, try to build the cmove insn directly.
1554 This is helpful when combine has created some complex condition
1555 (like for alpha's cmovlbs) that we can't hope to regenerate
1556 through the normal interface. */
1557
1558 if (if_info->cond_earliest == if_info->jump)
1559 {
1560 rtx cond = gen_rtx_fmt_ee (code, GET_MODE (if_info->cond), cmp_a, cmp_b);
1561 rtx if_then_else = gen_rtx_IF_THEN_ELSE (GET_MODE (x),
1562 cond, vtrue, vfalse);
1563 rtx set = gen_rtx_SET (x, if_then_else);
1564
1565 start_sequence ();
1566 rtx_insn *insn = emit_insn (set);
1567
1568 if (recog_memoized (insn) >= 0)
1569 {
1570 rtx_insn *seq = get_insns ();
1571 end_sequence ();
1572 emit_insn (seq);
1573
1574 return x;
1575 }
1576
1577 end_sequence ();
1578 }
1579
1580 /* Don't even try if the comparison operands are weird
1581 except that the target supports cbranchcc4. */
1582 if (! general_operand (cmp_a, GET_MODE (cmp_a))
1583 || ! general_operand (cmp_b, GET_MODE (cmp_b)))
1584 {
1585 if (!have_cbranchcc4
1586 || GET_MODE_CLASS (GET_MODE (cmp_a)) != MODE_CC
1587 || cmp_b != const0_rtx)
1588 return NULL_RTX;
1589 }
1590
1591 unsignedp = (code == LTU || code == GEU
1592 || code == LEU || code == GTU);
1593
1594 target = emit_conditional_move (x, code, cmp_a, cmp_b, VOIDmode,
1595 vtrue, vfalse, GET_MODE (x),
1596 unsignedp);
1597 if (target)
1598 return target;
1599
1600 /* We might be faced with a situation like:
1601
1602 x = (reg:M TARGET)
1603 vtrue = (subreg:M (reg:N VTRUE) BYTE)
1604 vfalse = (subreg:M (reg:N VFALSE) BYTE)
1605
1606 We can't do a conditional move in mode M, but it's possible that we
1607 could do a conditional move in mode N instead and take a subreg of
1608 the result.
1609
1610 If we can't create new pseudos, though, don't bother. */
1611 if (reload_completed)
1612 return NULL_RTX;
1613
1614 if (GET_CODE (vtrue) == SUBREG && GET_CODE (vfalse) == SUBREG)
1615 {
1616 rtx reg_vtrue = SUBREG_REG (vtrue);
1617 rtx reg_vfalse = SUBREG_REG (vfalse);
1618 unsigned int byte_vtrue = SUBREG_BYTE (vtrue);
1619 unsigned int byte_vfalse = SUBREG_BYTE (vfalse);
1620 rtx promoted_target;
1621
1622 if (GET_MODE (reg_vtrue) != GET_MODE (reg_vfalse)
1623 || byte_vtrue != byte_vfalse
1624 || (SUBREG_PROMOTED_VAR_P (vtrue)
1625 != SUBREG_PROMOTED_VAR_P (vfalse))
1626 || (SUBREG_PROMOTED_GET (vtrue)
1627 != SUBREG_PROMOTED_GET (vfalse)))
1628 return NULL_RTX;
1629
1630 promoted_target = gen_reg_rtx (GET_MODE (reg_vtrue));
1631
1632 target = emit_conditional_move (promoted_target, code, cmp_a, cmp_b,
1633 VOIDmode, reg_vtrue, reg_vfalse,
1634 GET_MODE (reg_vtrue), unsignedp);
1635 /* Nope, couldn't do it in that mode either. */
1636 if (!target)
1637 return NULL_RTX;
1638
1639 target = gen_rtx_SUBREG (GET_MODE (vtrue), promoted_target, byte_vtrue);
1640 SUBREG_PROMOTED_VAR_P (target) = SUBREG_PROMOTED_VAR_P (vtrue);
1641 SUBREG_PROMOTED_SET (target, SUBREG_PROMOTED_GET (vtrue));
1642 emit_move_insn (x, target);
1643 return x;
1644 }
1645 else
1646 return NULL_RTX;
1647 }
1648
1649 /* Try only simple constants and registers here. More complex cases
1650 are handled in noce_try_cmove_arith after noce_try_store_flag_arith
1651 has had a go at it. */
1652
1653 static int
1654 noce_try_cmove (struct noce_if_info *if_info)
1655 {
1656 enum rtx_code code;
1657 rtx target;
1658 rtx_insn *seq;
1659
1660 if (!noce_simple_bbs (if_info))
1661 return FALSE;
1662
1663 if ((CONSTANT_P (if_info->a) || register_operand (if_info->a, VOIDmode))
1664 && (CONSTANT_P (if_info->b) || register_operand (if_info->b, VOIDmode)))
1665 {
1666 start_sequence ();
1667
1668 code = GET_CODE (if_info->cond);
1669 target = noce_emit_cmove (if_info, if_info->x, code,
1670 XEXP (if_info->cond, 0),
1671 XEXP (if_info->cond, 1),
1672 if_info->a, if_info->b);
1673
1674 if (target)
1675 {
1676 if (target != if_info->x)
1677 noce_emit_move_insn (if_info->x, target);
1678
1679 seq = end_ifcvt_sequence (if_info);
1680 if (!seq)
1681 return FALSE;
1682
1683 emit_insn_before_setloc (seq, if_info->jump,
1684 INSN_LOCATION (if_info->insn_a));
1685 return TRUE;
1686 }
1687 /* If both a and b are constants try a last-ditch transformation:
1688 if (test) x = a; else x = b;
1689 => x = (-(test != 0) & (b - a)) + a;
1690 Try this only if the target-specific expansion above has failed.
1691 The target-specific expander may want to generate sequences that
1692 we don't know about, so give them a chance before trying this
1693 approach. */
1694 else if (!targetm.have_conditional_execution ()
1695 && CONST_INT_P (if_info->a) && CONST_INT_P (if_info->b)
1696 && ((if_info->branch_cost >= 2 && STORE_FLAG_VALUE == -1)
1697 || if_info->branch_cost >= 3))
1698 {
1699 machine_mode mode = GET_MODE (if_info->x);
1700 HOST_WIDE_INT ifalse = INTVAL (if_info->a);
1701 HOST_WIDE_INT itrue = INTVAL (if_info->b);
1702 rtx target = noce_emit_store_flag (if_info, if_info->x, false, -1);
1703 if (!target)
1704 {
1705 end_sequence ();
1706 return FALSE;
1707 }
1708
1709 HOST_WIDE_INT diff = (unsigned HOST_WIDE_INT) itrue - ifalse;
1710 /* Make sure we can represent the difference
1711 between the two values. */
1712 if ((diff > 0)
1713 != ((ifalse < 0) != (itrue < 0) ? ifalse < 0 : ifalse < itrue))
1714 {
1715 end_sequence ();
1716 return FALSE;
1717 }
1718
1719 diff = trunc_int_for_mode (diff, mode);
1720 target = expand_simple_binop (mode, AND,
1721 target, gen_int_mode (diff, mode),
1722 if_info->x, 0, OPTAB_WIDEN);
1723 if (target)
1724 target = expand_simple_binop (mode, PLUS,
1725 target, gen_int_mode (ifalse, mode),
1726 if_info->x, 0, OPTAB_WIDEN);
1727 if (target)
1728 {
1729 if (target != if_info->x)
1730 noce_emit_move_insn (if_info->x, target);
1731
1732 seq = end_ifcvt_sequence (if_info);
1733 if (!seq)
1734 return FALSE;
1735
1736 emit_insn_before_setloc (seq, if_info->jump,
1737 INSN_LOCATION (if_info->insn_a));
1738 return TRUE;
1739 }
1740 else
1741 {
1742 end_sequence ();
1743 return FALSE;
1744 }
1745 }
1746 else
1747 end_sequence ();
1748 }
1749
1750 return FALSE;
1751 }
1752
1753 /* Return true if X contains a conditional code mode rtx. */
1754
1755 static bool
1756 contains_ccmode_rtx_p (rtx x)
1757 {
1758 subrtx_iterator::array_type array;
1759 FOR_EACH_SUBRTX (iter, array, x, ALL)
1760 if (GET_MODE_CLASS (GET_MODE (*iter)) == MODE_CC)
1761 return true;
1762
1763 return false;
1764 }
1765
1766 /* Helper for bb_valid_for_noce_process_p. Validate that
1767 the rtx insn INSN is a single set that does not set
1768 the conditional register CC and is in general valid for
1769 if-conversion. */
1770
1771 static bool
1772 insn_valid_noce_process_p (rtx_insn *insn, rtx cc)
1773 {
1774 if (!insn
1775 || !NONJUMP_INSN_P (insn)
1776 || (cc && set_of (cc, insn)))
1777 return false;
1778
1779 rtx sset = single_set (insn);
1780
1781 /* Currently support only simple single sets in test_bb. */
1782 if (!sset
1783 || !noce_operand_ok (SET_DEST (sset))
1784 || contains_ccmode_rtx_p (SET_DEST (sset))
1785 || !noce_operand_ok (SET_SRC (sset)))
1786 return false;
1787
1788 return true;
1789 }
1790
1791
1792 /* Return true iff the registers that the insns in BB_A set do not
1793 get used in BB_B. */
1794
1795 static bool
1796 bbs_ok_for_cmove_arith (basic_block bb_a, basic_block bb_b)
1797 {
1798 rtx_insn *a_insn;
1799 bitmap bba_sets = BITMAP_ALLOC (&reg_obstack);
1800
1801 df_ref def;
1802 df_ref use;
1803
1804 FOR_BB_INSNS (bb_a, a_insn)
1805 {
1806 if (!active_insn_p (a_insn))
1807 continue;
1808
1809 rtx sset_a = single_set (a_insn);
1810
1811 if (!sset_a)
1812 {
1813 BITMAP_FREE (bba_sets);
1814 return false;
1815 }
1816
1817 /* Record all registers that BB_A sets. */
1818 FOR_EACH_INSN_DEF (def, a_insn)
1819 bitmap_set_bit (bba_sets, DF_REF_REGNO (def));
1820 }
1821
1822 rtx_insn *b_insn;
1823
1824 FOR_BB_INSNS (bb_b, b_insn)
1825 {
1826 if (!active_insn_p (b_insn))
1827 continue;
1828
1829 rtx sset_b = single_set (b_insn);
1830
1831 if (!sset_b)
1832 {
1833 BITMAP_FREE (bba_sets);
1834 return false;
1835 }
1836
1837 /* Make sure this is a REG and not some instance
1838 of ZERO_EXTRACT or SUBREG or other dangerous stuff. */
1839 if (!REG_P (SET_DEST (sset_b)))
1840 {
1841 BITMAP_FREE (bba_sets);
1842 return false;
1843 }
1844
1845 /* If the insn uses a reg set in BB_A return false. */
1846 FOR_EACH_INSN_USE (use, b_insn)
1847 {
1848 if (bitmap_bit_p (bba_sets, DF_REF_REGNO (use)))
1849 {
1850 BITMAP_FREE (bba_sets);
1851 return false;
1852 }
1853 }
1854
1855 }
1856
1857 BITMAP_FREE (bba_sets);
1858 return true;
1859 }
1860
1861 /* Emit copies of all the active instructions in BB except the last.
1862 This is a helper for noce_try_cmove_arith. */
1863
1864 static void
1865 noce_emit_all_but_last (basic_block bb)
1866 {
1867 rtx_insn *last = last_active_insn (bb, FALSE);
1868 rtx_insn *insn;
1869 FOR_BB_INSNS (bb, insn)
1870 {
1871 if (insn != last && active_insn_p (insn))
1872 {
1873 rtx_insn *to_emit = as_a <rtx_insn *> (copy_rtx (insn));
1874
1875 emit_insn (PATTERN (to_emit));
1876 }
1877 }
1878 }
1879
1880 /* Helper for noce_try_cmove_arith. Emit the pattern TO_EMIT and return
1881 the resulting insn or NULL if it's not a valid insn. */
1882
1883 static rtx_insn *
1884 noce_emit_insn (rtx to_emit)
1885 {
1886 gcc_assert (to_emit);
1887 rtx_insn *insn = emit_insn (to_emit);
1888
1889 if (recog_memoized (insn) < 0)
1890 return NULL;
1891
1892 return insn;
1893 }
1894
1895 /* Helper for noce_try_cmove_arith. Emit a copy of the insns up to
1896 and including the penultimate one in BB if it is not simple
1897 (as indicated by SIMPLE). Then emit LAST_INSN as the last
1898 insn in the block. The reason for that is that LAST_INSN may
1899 have been modified by the preparation in noce_try_cmove_arith. */
1900
1901 static bool
1902 noce_emit_bb (rtx last_insn, basic_block bb, bool simple)
1903 {
1904 if (bb && !simple)
1905 noce_emit_all_but_last (bb);
1906
1907 if (last_insn && !noce_emit_insn (last_insn))
1908 return false;
1909
1910 return true;
1911 }
1912
1913 /* Try more complex cases involving conditional_move. */
1914
1915 static int
1916 noce_try_cmove_arith (struct noce_if_info *if_info)
1917 {
1918 rtx a = if_info->a;
1919 rtx b = if_info->b;
1920 rtx x = if_info->x;
1921 rtx orig_a, orig_b;
1922 rtx_insn *insn_a, *insn_b;
1923 bool a_simple = if_info->then_simple;
1924 bool b_simple = if_info->else_simple;
1925 basic_block then_bb = if_info->then_bb;
1926 basic_block else_bb = if_info->else_bb;
1927 rtx target;
1928 int is_mem = 0;
1929 enum rtx_code code;
1930 rtx_insn *ifcvt_seq;
1931
1932 /* A conditional move from two memory sources is equivalent to a
1933 conditional on their addresses followed by a load. Don't do this
1934 early because it'll screw alias analysis. Note that we've
1935 already checked for no side effects. */
1936 /* ??? FIXME: Magic number 5. */
1937 if (cse_not_expected
1938 && MEM_P (a) && MEM_P (b)
1939 && MEM_ADDR_SPACE (a) == MEM_ADDR_SPACE (b)
1940 && if_info->branch_cost >= 5)
1941 {
1942 machine_mode address_mode = get_address_mode (a);
1943
1944 a = XEXP (a, 0);
1945 b = XEXP (b, 0);
1946 x = gen_reg_rtx (address_mode);
1947 is_mem = 1;
1948 }
1949
1950 /* ??? We could handle this if we knew that a load from A or B could
1951 not trap or fault. This is also true if we've already loaded
1952 from the address along the path from ENTRY. */
1953 else if (may_trap_or_fault_p (a) || may_trap_or_fault_p (b))
1954 return FALSE;
1955
1956 /* if (test) x = a + b; else x = c - d;
1957 => y = a + b;
1958 x = c - d;
1959 if (test)
1960 x = y;
1961 */
1962
1963 code = GET_CODE (if_info->cond);
1964 insn_a = if_info->insn_a;
1965 insn_b = if_info->insn_b;
1966
1967 machine_mode x_mode = GET_MODE (x);
1968
1969 if (!can_conditionally_move_p (x_mode))
1970 return FALSE;
1971
1972 unsigned int then_cost;
1973 unsigned int else_cost;
1974 if (insn_a)
1975 then_cost = if_info->then_cost;
1976 else
1977 then_cost = 0;
1978
1979 if (insn_b)
1980 else_cost = if_info->else_cost;
1981 else
1982 else_cost = 0;
1983
1984 /* We're going to execute one of the basic blocks anyway, so
1985 bail out if the most expensive of the two blocks is unacceptable. */
1986 if (MAX (then_cost, else_cost) > COSTS_N_INSNS (if_info->branch_cost))
1987 return FALSE;
1988
1989 /* Possibly rearrange operands to make things come out more natural. */
1990 if (reversed_comparison_code (if_info->cond, if_info->jump) != UNKNOWN)
1991 {
1992 int reversep = 0;
1993 if (rtx_equal_p (b, x))
1994 reversep = 1;
1995 else if (general_operand (b, GET_MODE (b)))
1996 reversep = 1;
1997
1998 if (reversep)
1999 {
2000 code = reversed_comparison_code (if_info->cond, if_info->jump);
2001 std::swap (a, b);
2002 std::swap (insn_a, insn_b);
2003 std::swap (a_simple, b_simple);
2004 std::swap (then_bb, else_bb);
2005 }
2006 }
2007
2008 if (then_bb && else_bb && !a_simple && !b_simple
2009 && (!bbs_ok_for_cmove_arith (then_bb, else_bb)
2010 || !bbs_ok_for_cmove_arith (else_bb, then_bb)))
2011 return FALSE;
2012
2013 start_sequence ();
2014
2015 /* If one of the blocks is empty then the corresponding B or A value
2016 came from the test block. The non-empty complex block that we will
2017 emit might clobber the register used by B or A, so move it to a pseudo
2018 first. */
2019
2020 if (b_simple || !else_bb)
2021 {
2022 rtx tmp_b = gen_reg_rtx (x_mode);
2023 /* Perform the simplest kind of set. The register allocator
2024 should remove it if it's not actually needed. If this set is not
2025 a valid insn (can happen on the is_mem path) then end_ifcvt_sequence
2026 will cancel the whole sequence. Don't try any of the fallback paths
2027 from noce_emit_move_insn since we want this to be the simplest kind
2028 of move. */
2029 emit_insn (gen_rtx_SET (tmp_b, b));
2030 b = tmp_b;
2031 }
2032
2033 if (a_simple || !then_bb)
2034 {
2035 rtx tmp_a = gen_reg_rtx (x_mode);
2036 emit_insn (gen_rtx_SET (tmp_a, a));
2037 a = tmp_a;
2038 }
2039
2040 orig_a = a;
2041 orig_b = b;
2042
2043 rtx emit_a = NULL_RTX;
2044 rtx emit_b = NULL_RTX;
2045
2046 /* If either operand is complex, load it into a register first.
2047 The best way to do this is to copy the original insn. In this
2048 way we preserve any clobbers etc that the insn may have had.
2049 This is of course not possible in the IS_MEM case. */
2050
2051 if (! general_operand (a, GET_MODE (a)))
2052 {
2053
2054 if (is_mem)
2055 {
2056 rtx reg = gen_reg_rtx (GET_MODE (a));
2057 emit_a = gen_rtx_SET (reg, a);
2058 }
2059 else if (! insn_a)
2060 goto end_seq_and_fail;
2061 else
2062 {
2063 a = gen_reg_rtx (GET_MODE (a));
2064 rtx_insn *copy_of_a = as_a <rtx_insn *> (copy_rtx (insn_a));
2065 rtx set = single_set (copy_of_a);
2066 SET_DEST (set) = a;
2067
2068 emit_a = PATTERN (copy_of_a);
2069 }
2070 }
2071
2072 if (! general_operand (b, GET_MODE (b)))
2073 {
2074 if (is_mem)
2075 {
2076 rtx reg = gen_reg_rtx (GET_MODE (b));
2077 emit_b = gen_rtx_SET (reg, b);
2078 }
2079 else if (! insn_b)
2080 goto end_seq_and_fail;
2081 else
2082 {
2083 b = gen_reg_rtx (GET_MODE (b));
2084 rtx_insn *copy_of_b = as_a <rtx_insn *> (copy_rtx (insn_b));
2085 rtx set = single_set (copy_of_b);
2086
2087 SET_DEST (set) = b;
2088 emit_b = PATTERN (copy_of_b);
2089 }
2090 }
2091
2092 /* If insn to set up A clobbers any registers B depends on, try to
2093 swap insn that sets up A with the one that sets up B. If even
2094 that doesn't help, punt. */
2095
2096 if (emit_a && modified_in_p (orig_b, emit_a))
2097 {
2098 if (modified_in_p (orig_a, emit_b))
2099 goto end_seq_and_fail;
2100
2101 if (else_bb && !b_simple)
2102 {
2103 if (!noce_emit_bb (emit_b, else_bb, b_simple))
2104 goto end_seq_and_fail;
2105 }
2106
2107 if (!noce_emit_bb (emit_a, then_bb, a_simple))
2108 goto end_seq_and_fail;
2109 }
2110 else
2111 {
2112 if (!noce_emit_bb (emit_a, then_bb, a_simple))
2113 goto end_seq_and_fail;
2114
2115 if (!noce_emit_bb (emit_b, else_bb, b_simple))
2116 goto end_seq_and_fail;
2117
2118 }
2119
2120 target = noce_emit_cmove (if_info, x, code, XEXP (if_info->cond, 0),
2121 XEXP (if_info->cond, 1), a, b);
2122
2123 if (! target)
2124 goto end_seq_and_fail;
2125
2126 /* If we're handling a memory for above, emit the load now. */
2127 if (is_mem)
2128 {
2129 rtx mem = gen_rtx_MEM (GET_MODE (if_info->x), target);
2130
2131 /* Copy over flags as appropriate. */
2132 if (MEM_VOLATILE_P (if_info->a) || MEM_VOLATILE_P (if_info->b))
2133 MEM_VOLATILE_P (mem) = 1;
2134 if (MEM_ALIAS_SET (if_info->a) == MEM_ALIAS_SET (if_info->b))
2135 set_mem_alias_set (mem, MEM_ALIAS_SET (if_info->a));
2136 set_mem_align (mem,
2137 MIN (MEM_ALIGN (if_info->a), MEM_ALIGN (if_info->b)));
2138
2139 gcc_assert (MEM_ADDR_SPACE (if_info->a) == MEM_ADDR_SPACE (if_info->b));
2140 set_mem_addr_space (mem, MEM_ADDR_SPACE (if_info->a));
2141
2142 noce_emit_move_insn (if_info->x, mem);
2143 }
2144 else if (target != x)
2145 noce_emit_move_insn (x, target);
2146
2147 ifcvt_seq = end_ifcvt_sequence (if_info);
2148 if (!ifcvt_seq)
2149 return FALSE;
2150
2151 emit_insn_before_setloc (ifcvt_seq, if_info->jump,
2152 INSN_LOCATION (if_info->insn_a));
2153 return TRUE;
2154
2155 end_seq_and_fail:
2156 end_sequence ();
2157 return FALSE;
2158 }
2159
2160 /* For most cases, the simplified condition we found is the best
2161 choice, but this is not the case for the min/max/abs transforms.
2162 For these we wish to know that it is A or B in the condition. */
2163
2164 static rtx
2165 noce_get_alt_condition (struct noce_if_info *if_info, rtx target,
2166 rtx_insn **earliest)
2167 {
2168 rtx cond, set;
2169 rtx_insn *insn;
2170 int reverse;
2171
2172 /* If target is already mentioned in the known condition, return it. */
2173 if (reg_mentioned_p (target, if_info->cond))
2174 {
2175 *earliest = if_info->cond_earliest;
2176 return if_info->cond;
2177 }
2178
2179 set = pc_set (if_info->jump);
2180 cond = XEXP (SET_SRC (set), 0);
2181 reverse
2182 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
2183 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (if_info->jump);
2184 if (if_info->then_else_reversed)
2185 reverse = !reverse;
2186
2187 /* If we're looking for a constant, try to make the conditional
2188 have that constant in it. There are two reasons why it may
2189 not have the constant we want:
2190
2191 1. GCC may have needed to put the constant in a register, because
2192 the target can't compare directly against that constant. For
2193 this case, we look for a SET immediately before the comparison
2194 that puts a constant in that register.
2195
2196 2. GCC may have canonicalized the conditional, for example
2197 replacing "if x < 4" with "if x <= 3". We can undo that (or
2198 make equivalent types of changes) to get the constants we need
2199 if they're off by one in the right direction. */
2200
2201 if (CONST_INT_P (target))
2202 {
2203 enum rtx_code code = GET_CODE (if_info->cond);
2204 rtx op_a = XEXP (if_info->cond, 0);
2205 rtx op_b = XEXP (if_info->cond, 1);
2206 rtx_insn *prev_insn;
2207
2208 /* First, look to see if we put a constant in a register. */
2209 prev_insn = prev_nonnote_insn (if_info->cond_earliest);
2210 if (prev_insn
2211 && BLOCK_FOR_INSN (prev_insn)
2212 == BLOCK_FOR_INSN (if_info->cond_earliest)
2213 && INSN_P (prev_insn)
2214 && GET_CODE (PATTERN (prev_insn)) == SET)
2215 {
2216 rtx src = find_reg_equal_equiv_note (prev_insn);
2217 if (!src)
2218 src = SET_SRC (PATTERN (prev_insn));
2219 if (CONST_INT_P (src))
2220 {
2221 if (rtx_equal_p (op_a, SET_DEST (PATTERN (prev_insn))))
2222 op_a = src;
2223 else if (rtx_equal_p (op_b, SET_DEST (PATTERN (prev_insn))))
2224 op_b = src;
2225
2226 if (CONST_INT_P (op_a))
2227 {
2228 std::swap (op_a, op_b);
2229 code = swap_condition (code);
2230 }
2231 }
2232 }
2233
2234 /* Now, look to see if we can get the right constant by
2235 adjusting the conditional. */
2236 if (CONST_INT_P (op_b))
2237 {
2238 HOST_WIDE_INT desired_val = INTVAL (target);
2239 HOST_WIDE_INT actual_val = INTVAL (op_b);
2240
2241 switch (code)
2242 {
2243 case LT:
2244 if (actual_val == desired_val + 1)
2245 {
2246 code = LE;
2247 op_b = GEN_INT (desired_val);
2248 }
2249 break;
2250 case LE:
2251 if (actual_val == desired_val - 1)
2252 {
2253 code = LT;
2254 op_b = GEN_INT (desired_val);
2255 }
2256 break;
2257 case GT:
2258 if (actual_val == desired_val - 1)
2259 {
2260 code = GE;
2261 op_b = GEN_INT (desired_val);
2262 }
2263 break;
2264 case GE:
2265 if (actual_val == desired_val + 1)
2266 {
2267 code = GT;
2268 op_b = GEN_INT (desired_val);
2269 }
2270 break;
2271 default:
2272 break;
2273 }
2274 }
2275
2276 /* If we made any changes, generate a new conditional that is
2277 equivalent to what we started with, but has the right
2278 constants in it. */
2279 if (code != GET_CODE (if_info->cond)
2280 || op_a != XEXP (if_info->cond, 0)
2281 || op_b != XEXP (if_info->cond, 1))
2282 {
2283 cond = gen_rtx_fmt_ee (code, GET_MODE (cond), op_a, op_b);
2284 *earliest = if_info->cond_earliest;
2285 return cond;
2286 }
2287 }
2288
2289 cond = canonicalize_condition (if_info->jump, cond, reverse,
2290 earliest, target, have_cbranchcc4, true);
2291 if (! cond || ! reg_mentioned_p (target, cond))
2292 return NULL;
2293
2294 /* We almost certainly searched back to a different place.
2295 Need to re-verify correct lifetimes. */
2296
2297 /* X may not be mentioned in the range (cond_earliest, jump]. */
2298 for (insn = if_info->jump; insn != *earliest; insn = PREV_INSN (insn))
2299 if (INSN_P (insn) && reg_overlap_mentioned_p (if_info->x, PATTERN (insn)))
2300 return NULL;
2301
2302 /* A and B may not be modified in the range [cond_earliest, jump). */
2303 for (insn = *earliest; insn != if_info->jump; insn = NEXT_INSN (insn))
2304 if (INSN_P (insn)
2305 && (modified_in_p (if_info->a, insn)
2306 || modified_in_p (if_info->b, insn)))
2307 return NULL;
2308
2309 return cond;
2310 }
2311
2312 /* Convert "if (a < b) x = a; else x = b;" to "x = min(a, b);", etc. */
2313
2314 static int
2315 noce_try_minmax (struct noce_if_info *if_info)
2316 {
2317 rtx cond, target;
2318 rtx_insn *earliest, *seq;
2319 enum rtx_code code, op;
2320 int unsignedp;
2321
2322 if (!noce_simple_bbs (if_info))
2323 return FALSE;
2324
2325 /* ??? Reject modes with NaNs or signed zeros since we don't know how
2326 they will be resolved with an SMIN/SMAX. It wouldn't be too hard
2327 to get the target to tell us... */
2328 if (HONOR_SIGNED_ZEROS (if_info->x)
2329 || HONOR_NANS (if_info->x))
2330 return FALSE;
2331
2332 cond = noce_get_alt_condition (if_info, if_info->a, &earliest);
2333 if (!cond)
2334 return FALSE;
2335
2336 /* Verify the condition is of the form we expect, and canonicalize
2337 the comparison code. */
2338 code = GET_CODE (cond);
2339 if (rtx_equal_p (XEXP (cond, 0), if_info->a))
2340 {
2341 if (! rtx_equal_p (XEXP (cond, 1), if_info->b))
2342 return FALSE;
2343 }
2344 else if (rtx_equal_p (XEXP (cond, 1), if_info->a))
2345 {
2346 if (! rtx_equal_p (XEXP (cond, 0), if_info->b))
2347 return FALSE;
2348 code = swap_condition (code);
2349 }
2350 else
2351 return FALSE;
2352
2353 /* Determine what sort of operation this is. Note that the code is for
2354 a taken branch, so the code->operation mapping appears backwards. */
2355 switch (code)
2356 {
2357 case LT:
2358 case LE:
2359 case UNLT:
2360 case UNLE:
2361 op = SMAX;
2362 unsignedp = 0;
2363 break;
2364 case GT:
2365 case GE:
2366 case UNGT:
2367 case UNGE:
2368 op = SMIN;
2369 unsignedp = 0;
2370 break;
2371 case LTU:
2372 case LEU:
2373 op = UMAX;
2374 unsignedp = 1;
2375 break;
2376 case GTU:
2377 case GEU:
2378 op = UMIN;
2379 unsignedp = 1;
2380 break;
2381 default:
2382 return FALSE;
2383 }
2384
2385 start_sequence ();
2386
2387 target = expand_simple_binop (GET_MODE (if_info->x), op,
2388 if_info->a, if_info->b,
2389 if_info->x, unsignedp, OPTAB_WIDEN);
2390 if (! target)
2391 {
2392 end_sequence ();
2393 return FALSE;
2394 }
2395 if (target != if_info->x)
2396 noce_emit_move_insn (if_info->x, target);
2397
2398 seq = end_ifcvt_sequence (if_info);
2399 if (!seq)
2400 return FALSE;
2401
2402 emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATION (if_info->insn_a));
2403 if_info->cond = cond;
2404 if_info->cond_earliest = earliest;
2405
2406 return TRUE;
2407 }
2408
2409 /* Convert "if (a < 0) x = -a; else x = a;" to "x = abs(a);",
2410 "if (a < 0) x = ~a; else x = a;" to "x = one_cmpl_abs(a);",
2411 etc. */
2412
2413 static int
2414 noce_try_abs (struct noce_if_info *if_info)
2415 {
2416 rtx cond, target, a, b, c;
2417 rtx_insn *earliest, *seq;
2418 int negate;
2419 bool one_cmpl = false;
2420
2421 if (!noce_simple_bbs (if_info))
2422 return FALSE;
2423
2424 /* Reject modes with signed zeros. */
2425 if (HONOR_SIGNED_ZEROS (if_info->x))
2426 return FALSE;
2427
2428 /* Recognize A and B as constituting an ABS or NABS. The canonical
2429 form is a branch around the negation, taken when the object is the
2430 first operand of a comparison against 0 that evaluates to true. */
2431 a = if_info->a;
2432 b = if_info->b;
2433 if (GET_CODE (a) == NEG && rtx_equal_p (XEXP (a, 0), b))
2434 negate = 0;
2435 else if (GET_CODE (b) == NEG && rtx_equal_p (XEXP (b, 0), a))
2436 {
2437 std::swap (a, b);
2438 negate = 1;
2439 }
2440 else if (GET_CODE (a) == NOT && rtx_equal_p (XEXP (a, 0), b))
2441 {
2442 negate = 0;
2443 one_cmpl = true;
2444 }
2445 else if (GET_CODE (b) == NOT && rtx_equal_p (XEXP (b, 0), a))
2446 {
2447 std::swap (a, b);
2448 negate = 1;
2449 one_cmpl = true;
2450 }
2451 else
2452 return FALSE;
2453
2454 cond = noce_get_alt_condition (if_info, b, &earliest);
2455 if (!cond)
2456 return FALSE;
2457
2458 /* Verify the condition is of the form we expect. */
2459 if (rtx_equal_p (XEXP (cond, 0), b))
2460 c = XEXP (cond, 1);
2461 else if (rtx_equal_p (XEXP (cond, 1), b))
2462 {
2463 c = XEXP (cond, 0);
2464 negate = !negate;
2465 }
2466 else
2467 return FALSE;
2468
2469 /* Verify that C is zero. Search one step backward for a
2470 REG_EQUAL note or a simple source if necessary. */
2471 if (REG_P (c))
2472 {
2473 rtx set;
2474 rtx_insn *insn = prev_nonnote_insn (earliest);
2475 if (insn
2476 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (earliest)
2477 && (set = single_set (insn))
2478 && rtx_equal_p (SET_DEST (set), c))
2479 {
2480 rtx note = find_reg_equal_equiv_note (insn);
2481 if (note)
2482 c = XEXP (note, 0);
2483 else
2484 c = SET_SRC (set);
2485 }
2486 else
2487 return FALSE;
2488 }
2489 if (MEM_P (c)
2490 && GET_CODE (XEXP (c, 0)) == SYMBOL_REF
2491 && CONSTANT_POOL_ADDRESS_P (XEXP (c, 0)))
2492 c = get_pool_constant (XEXP (c, 0));
2493
2494 /* Work around funny ideas get_condition has wrt canonicalization.
2495 Note that these rtx constants are known to be CONST_INT, and
2496 therefore imply integer comparisons. */
2497 if (c == constm1_rtx && GET_CODE (cond) == GT)
2498 ;
2499 else if (c == const1_rtx && GET_CODE (cond) == LT)
2500 ;
2501 else if (c != CONST0_RTX (GET_MODE (b)))
2502 return FALSE;
2503
2504 /* Determine what sort of operation this is. */
2505 switch (GET_CODE (cond))
2506 {
2507 case LT:
2508 case LE:
2509 case UNLT:
2510 case UNLE:
2511 negate = !negate;
2512 break;
2513 case GT:
2514 case GE:
2515 case UNGT:
2516 case UNGE:
2517 break;
2518 default:
2519 return FALSE;
2520 }
2521
2522 start_sequence ();
2523 if (one_cmpl)
2524 target = expand_one_cmpl_abs_nojump (GET_MODE (if_info->x), b,
2525 if_info->x);
2526 else
2527 target = expand_abs_nojump (GET_MODE (if_info->x), b, if_info->x, 1);
2528
2529 /* ??? It's a quandary whether cmove would be better here, especially
2530 for integers. Perhaps combine will clean things up. */
2531 if (target && negate)
2532 {
2533 if (one_cmpl)
2534 target = expand_simple_unop (GET_MODE (target), NOT, target,
2535 if_info->x, 0);
2536 else
2537 target = expand_simple_unop (GET_MODE (target), NEG, target,
2538 if_info->x, 0);
2539 }
2540
2541 if (! target)
2542 {
2543 end_sequence ();
2544 return FALSE;
2545 }
2546
2547 if (target != if_info->x)
2548 noce_emit_move_insn (if_info->x, target);
2549
2550 seq = end_ifcvt_sequence (if_info);
2551 if (!seq)
2552 return FALSE;
2553
2554 emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATION (if_info->insn_a));
2555 if_info->cond = cond;
2556 if_info->cond_earliest = earliest;
2557
2558 return TRUE;
2559 }
2560
2561 /* Convert "if (m < 0) x = b; else x = 0;" to "x = (m >> C) & b;". */
2562
2563 static int
2564 noce_try_sign_mask (struct noce_if_info *if_info)
2565 {
2566 rtx cond, t, m, c;
2567 rtx_insn *seq;
2568 machine_mode mode;
2569 enum rtx_code code;
2570 bool t_unconditional;
2571
2572 if (!noce_simple_bbs (if_info))
2573 return FALSE;
2574
2575 cond = if_info->cond;
2576 code = GET_CODE (cond);
2577 m = XEXP (cond, 0);
2578 c = XEXP (cond, 1);
2579
2580 t = NULL_RTX;
2581 if (if_info->a == const0_rtx)
2582 {
2583 if ((code == LT && c == const0_rtx)
2584 || (code == LE && c == constm1_rtx))
2585 t = if_info->b;
2586 }
2587 else if (if_info->b == const0_rtx)
2588 {
2589 if ((code == GE && c == const0_rtx)
2590 || (code == GT && c == constm1_rtx))
2591 t = if_info->a;
2592 }
2593
2594 if (! t || side_effects_p (t))
2595 return FALSE;
2596
2597 /* We currently don't handle different modes. */
2598 mode = GET_MODE (t);
2599 if (GET_MODE (m) != mode)
2600 return FALSE;
2601
2602 /* This is only profitable if T is unconditionally executed/evaluated in the
2603 original insn sequence or T is cheap. The former happens if B is the
2604 non-zero (T) value and if INSN_B was taken from TEST_BB, or there was no
2605 INSN_B which can happen for e.g. conditional stores to memory. For the
2606 cost computation use the block TEST_BB where the evaluation will end up
2607 after the transformation. */
2608 t_unconditional =
2609 (t == if_info->b
2610 && (if_info->insn_b == NULL_RTX
2611 || BLOCK_FOR_INSN (if_info->insn_b) == if_info->test_bb));
2612 if (!(t_unconditional
2613 || (set_src_cost (t, mode, optimize_bb_for_speed_p (if_info->test_bb))
2614 < COSTS_N_INSNS (2))))
2615 return FALSE;
2616
2617 start_sequence ();
2618 /* Use emit_store_flag to generate "m < 0 ? -1 : 0" instead of expanding
2619 "(signed) m >> 31" directly. This benefits targets with specialized
2620 insns to obtain the signmask, but still uses ashr_optab otherwise. */
2621 m = emit_store_flag (gen_reg_rtx (mode), LT, m, const0_rtx, mode, 0, -1);
2622 t = m ? expand_binop (mode, and_optab, m, t, NULL_RTX, 0, OPTAB_DIRECT)
2623 : NULL_RTX;
2624
2625 if (!t)
2626 {
2627 end_sequence ();
2628 return FALSE;
2629 }
2630
2631 noce_emit_move_insn (if_info->x, t);
2632
2633 seq = end_ifcvt_sequence (if_info);
2634 if (!seq)
2635 return FALSE;
2636
2637 emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATION (if_info->insn_a));
2638 return TRUE;
2639 }
2640
2641
2642 /* Optimize away "if (x & C) x |= C" and similar bit manipulation
2643 transformations. */
2644
2645 static int
2646 noce_try_bitop (struct noce_if_info *if_info)
2647 {
2648 rtx cond, x, a, result;
2649 rtx_insn *seq;
2650 machine_mode mode;
2651 enum rtx_code code;
2652 int bitnum;
2653
2654 x = if_info->x;
2655 cond = if_info->cond;
2656 code = GET_CODE (cond);
2657
2658 if (!noce_simple_bbs (if_info))
2659 return FALSE;
2660
2661 /* Check for no else condition. */
2662 if (! rtx_equal_p (x, if_info->b))
2663 return FALSE;
2664
2665 /* Check for a suitable condition. */
2666 if (code != NE && code != EQ)
2667 return FALSE;
2668 if (XEXP (cond, 1) != const0_rtx)
2669 return FALSE;
2670 cond = XEXP (cond, 0);
2671
2672 /* ??? We could also handle AND here. */
2673 if (GET_CODE (cond) == ZERO_EXTRACT)
2674 {
2675 if (XEXP (cond, 1) != const1_rtx
2676 || !CONST_INT_P (XEXP (cond, 2))
2677 || ! rtx_equal_p (x, XEXP (cond, 0)))
2678 return FALSE;
2679 bitnum = INTVAL (XEXP (cond, 2));
2680 mode = GET_MODE (x);
2681 if (BITS_BIG_ENDIAN)
2682 bitnum = GET_MODE_BITSIZE (mode) - 1 - bitnum;
2683 if (bitnum < 0 || bitnum >= HOST_BITS_PER_WIDE_INT)
2684 return FALSE;
2685 }
2686 else
2687 return FALSE;
2688
2689 a = if_info->a;
2690 if (GET_CODE (a) == IOR || GET_CODE (a) == XOR)
2691 {
2692 /* Check for "if (X & C) x = x op C". */
2693 if (! rtx_equal_p (x, XEXP (a, 0))
2694 || !CONST_INT_P (XEXP (a, 1))
2695 || (INTVAL (XEXP (a, 1)) & GET_MODE_MASK (mode))
2696 != (unsigned HOST_WIDE_INT) 1 << bitnum)
2697 return FALSE;
2698
2699 /* if ((x & C) == 0) x |= C; is transformed to x |= C. */
2700 /* if ((x & C) != 0) x |= C; is transformed to nothing. */
2701 if (GET_CODE (a) == IOR)
2702 result = (code == NE) ? a : NULL_RTX;
2703 else if (code == NE)
2704 {
2705 /* if ((x & C) == 0) x ^= C; is transformed to x |= C. */
2706 result = gen_int_mode ((HOST_WIDE_INT) 1 << bitnum, mode);
2707 result = simplify_gen_binary (IOR, mode, x, result);
2708 }
2709 else
2710 {
2711 /* if ((x & C) != 0) x ^= C; is transformed to x &= ~C. */
2712 result = gen_int_mode (~((HOST_WIDE_INT) 1 << bitnum), mode);
2713 result = simplify_gen_binary (AND, mode, x, result);
2714 }
2715 }
2716 else if (GET_CODE (a) == AND)
2717 {
2718 /* Check for "if (X & C) x &= ~C". */
2719 if (! rtx_equal_p (x, XEXP (a, 0))
2720 || !CONST_INT_P (XEXP (a, 1))
2721 || (INTVAL (XEXP (a, 1)) & GET_MODE_MASK (mode))
2722 != (~((HOST_WIDE_INT) 1 << bitnum) & GET_MODE_MASK (mode)))
2723 return FALSE;
2724
2725 /* if ((x & C) == 0) x &= ~C; is transformed to nothing. */
2726 /* if ((x & C) != 0) x &= ~C; is transformed to x &= ~C. */
2727 result = (code == EQ) ? a : NULL_RTX;
2728 }
2729 else
2730 return FALSE;
2731
2732 if (result)
2733 {
2734 start_sequence ();
2735 noce_emit_move_insn (x, result);
2736 seq = end_ifcvt_sequence (if_info);
2737 if (!seq)
2738 return FALSE;
2739
2740 emit_insn_before_setloc (seq, if_info->jump,
2741 INSN_LOCATION (if_info->insn_a));
2742 }
2743 return TRUE;
2744 }
2745
2746
2747 /* Similar to get_condition, only the resulting condition must be
2748 valid at JUMP, instead of at EARLIEST.
2749
2750 If THEN_ELSE_REVERSED is true, the fallthrough does not go to the
2751 THEN block of the caller, and we have to reverse the condition. */
2752
2753 static rtx
2754 noce_get_condition (rtx_insn *jump, rtx_insn **earliest, bool then_else_reversed)
2755 {
2756 rtx cond, set, tmp;
2757 bool reverse;
2758
2759 if (! any_condjump_p (jump))
2760 return NULL_RTX;
2761
2762 set = pc_set (jump);
2763
2764 /* If this branches to JUMP_LABEL when the condition is false,
2765 reverse the condition. */
2766 reverse = (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
2767 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump));
2768
2769 /* We may have to reverse because the caller's if block is not canonical,
2770 i.e. the THEN block isn't the fallthrough block for the TEST block
2771 (see find_if_header). */
2772 if (then_else_reversed)
2773 reverse = !reverse;
2774
2775 /* If the condition variable is a register and is MODE_INT, accept it. */
2776
2777 cond = XEXP (SET_SRC (set), 0);
2778 tmp = XEXP (cond, 0);
2779 if (REG_P (tmp) && GET_MODE_CLASS (GET_MODE (tmp)) == MODE_INT
2780 && (GET_MODE (tmp) != BImode
2781 || !targetm.small_register_classes_for_mode_p (BImode)))
2782 {
2783 *earliest = jump;
2784
2785 if (reverse)
2786 cond = gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)),
2787 GET_MODE (cond), tmp, XEXP (cond, 1));
2788 return cond;
2789 }
2790
2791 /* Otherwise, fall back on canonicalize_condition to do the dirty
2792 work of manipulating MODE_CC values and COMPARE rtx codes. */
2793 tmp = canonicalize_condition (jump, cond, reverse, earliest,
2794 NULL_RTX, have_cbranchcc4, true);
2795
2796 /* We don't handle side-effects in the condition, like handling
2797 REG_INC notes and making sure no duplicate conditions are emitted. */
2798 if (tmp != NULL_RTX && side_effects_p (tmp))
2799 return NULL_RTX;
2800
2801 return tmp;
2802 }
2803
2804 /* Return true if OP is ok for if-then-else processing. */
2805
2806 static int
2807 noce_operand_ok (const_rtx op)
2808 {
2809 if (side_effects_p (op))
2810 return FALSE;
2811
2812 /* We special-case memories, so handle any of them with
2813 no address side effects. */
2814 if (MEM_P (op))
2815 return ! side_effects_p (XEXP (op, 0));
2816
2817 return ! may_trap_p (op);
2818 }
2819
2820 /* Return true if a write into MEM may trap or fault. */
2821
2822 static bool
2823 noce_mem_write_may_trap_or_fault_p (const_rtx mem)
2824 {
2825 rtx addr;
2826
2827 if (MEM_READONLY_P (mem))
2828 return true;
2829
2830 if (may_trap_or_fault_p (mem))
2831 return true;
2832
2833 addr = XEXP (mem, 0);
2834
2835 /* Call target hook to avoid the effects of -fpic etc.... */
2836 addr = targetm.delegitimize_address (addr);
2837
2838 while (addr)
2839 switch (GET_CODE (addr))
2840 {
2841 case CONST:
2842 case PRE_DEC:
2843 case PRE_INC:
2844 case POST_DEC:
2845 case POST_INC:
2846 case POST_MODIFY:
2847 addr = XEXP (addr, 0);
2848 break;
2849 case LO_SUM:
2850 case PRE_MODIFY:
2851 addr = XEXP (addr, 1);
2852 break;
2853 case PLUS:
2854 if (CONST_INT_P (XEXP (addr, 1)))
2855 addr = XEXP (addr, 0);
2856 else
2857 return false;
2858 break;
2859 case LABEL_REF:
2860 return true;
2861 case SYMBOL_REF:
2862 if (SYMBOL_REF_DECL (addr)
2863 && decl_readonly_section (SYMBOL_REF_DECL (addr), 0))
2864 return true;
2865 return false;
2866 default:
2867 return false;
2868 }
2869
2870 return false;
2871 }
2872
2873 /* Return whether we can use store speculation for MEM. TOP_BB is the
2874 basic block above the conditional block where we are considering
2875 doing the speculative store. We look for whether MEM is set
2876 unconditionally later in the function. */
2877
2878 static bool
2879 noce_can_store_speculate_p (basic_block top_bb, const_rtx mem)
2880 {
2881 basic_block dominator;
2882
2883 for (dominator = get_immediate_dominator (CDI_POST_DOMINATORS, top_bb);
2884 dominator != NULL;
2885 dominator = get_immediate_dominator (CDI_POST_DOMINATORS, dominator))
2886 {
2887 rtx_insn *insn;
2888
2889 FOR_BB_INSNS (dominator, insn)
2890 {
2891 /* If we see something that might be a memory barrier, we
2892 have to stop looking. Even if the MEM is set later in
2893 the function, we still don't want to set it
2894 unconditionally before the barrier. */
2895 if (INSN_P (insn)
2896 && (volatile_insn_p (PATTERN (insn))
2897 || (CALL_P (insn) && (!RTL_CONST_CALL_P (insn)))))
2898 return false;
2899
2900 if (memory_must_be_modified_in_insn_p (mem, insn))
2901 return true;
2902 if (modified_in_p (XEXP (mem, 0), insn))
2903 return false;
2904
2905 }
2906 }
2907
2908 return false;
2909 }
2910
2911 /* Return true if X contains a MEM subrtx. */
2912
2913 static bool
2914 contains_mem_rtx_p (rtx x)
2915 {
2916 subrtx_iterator::array_type array;
2917 FOR_EACH_SUBRTX (iter, array, x, ALL)
2918 if (MEM_P (*iter))
2919 return true;
2920
2921 return false;
2922 }
2923
2924 /* Return true iff basic block TEST_BB is valid for noce if-conversion.
2925 The condition used in this if-conversion is in COND.
2926 In practice, check that TEST_BB ends with a single set
2927 x := a and all previous computations
2928 in TEST_BB don't produce any values that are live after TEST_BB.
2929 In other words, all the insns in TEST_BB are there only
2930 to compute a value for x. Put the rtx cost of the insns
2931 in TEST_BB into COST. Record whether TEST_BB is a single simple
2932 set instruction in SIMPLE_P. */
2933
2934 static bool
2935 bb_valid_for_noce_process_p (basic_block test_bb, rtx cond,
2936 unsigned int *cost, bool *simple_p)
2937 {
2938 if (!test_bb)
2939 return false;
2940
2941 rtx_insn *last_insn = last_active_insn (test_bb, FALSE);
2942 rtx last_set = NULL_RTX;
2943
2944 rtx cc = cc_in_cond (cond);
2945
2946 if (!insn_valid_noce_process_p (last_insn, cc))
2947 return false;
2948 last_set = single_set (last_insn);
2949
2950 rtx x = SET_DEST (last_set);
2951 rtx_insn *first_insn = first_active_insn (test_bb);
2952 rtx first_set = single_set (first_insn);
2953
2954 if (!first_set)
2955 return false;
2956
2957 /* We have a single simple set, that's okay. */
2958 bool speed_p = optimize_bb_for_speed_p (test_bb);
2959
2960 if (first_insn == last_insn)
2961 {
2962 *simple_p = noce_operand_ok (SET_DEST (first_set));
2963 *cost = insn_rtx_cost (first_set, speed_p);
2964 return *simple_p;
2965 }
2966
2967 rtx_insn *prev_last_insn = PREV_INSN (last_insn);
2968 gcc_assert (prev_last_insn);
2969
2970 /* For now, disallow setting x multiple times in test_bb. */
2971 if (REG_P (x) && reg_set_between_p (x, first_insn, prev_last_insn))
2972 return false;
2973
2974 bitmap test_bb_temps = BITMAP_ALLOC (&reg_obstack);
2975
2976 /* The regs that are live out of test_bb. */
2977 bitmap test_bb_live_out = df_get_live_out (test_bb);
2978
2979 int potential_cost = insn_rtx_cost (last_set, speed_p);
2980 rtx_insn *insn;
2981 FOR_BB_INSNS (test_bb, insn)
2982 {
2983 if (insn != last_insn)
2984 {
2985 if (!active_insn_p (insn))
2986 continue;
2987
2988 if (!insn_valid_noce_process_p (insn, cc))
2989 goto free_bitmap_and_fail;
2990
2991 rtx sset = single_set (insn);
2992 gcc_assert (sset);
2993
2994 if (contains_mem_rtx_p (SET_SRC (sset))
2995 || !REG_P (SET_DEST (sset))
2996 || reg_overlap_mentioned_p (SET_DEST (sset), cond))
2997 goto free_bitmap_and_fail;
2998
2999 potential_cost += insn_rtx_cost (sset, speed_p);
3000 bitmap_set_bit (test_bb_temps, REGNO (SET_DEST (sset)));
3001 }
3002 }
3003
3004 /* If any of the intermediate results in test_bb are live after test_bb
3005 then fail. */
3006 if (bitmap_intersect_p (test_bb_live_out, test_bb_temps))
3007 goto free_bitmap_and_fail;
3008
3009 BITMAP_FREE (test_bb_temps);
3010 *cost = potential_cost;
3011 *simple_p = false;
3012 return true;
3013
3014 free_bitmap_and_fail:
3015 BITMAP_FREE (test_bb_temps);
3016 return false;
3017 }
3018
3019 /* Given a simple IF-THEN-JOIN or IF-THEN-ELSE-JOIN block, attempt to convert
3020 it without using conditional execution. Return TRUE if we were successful
3021 at converting the block. */
3022
3023 static int
3024 noce_process_if_block (struct noce_if_info *if_info)
3025 {
3026 basic_block test_bb = if_info->test_bb; /* test block */
3027 basic_block then_bb = if_info->then_bb; /* THEN */
3028 basic_block else_bb = if_info->else_bb; /* ELSE or NULL */
3029 basic_block join_bb = if_info->join_bb; /* JOIN */
3030 rtx_insn *jump = if_info->jump;
3031 rtx cond = if_info->cond;
3032 rtx_insn *insn_a, *insn_b;
3033 rtx set_a, set_b;
3034 rtx orig_x, x, a, b;
3035
3036 /* We're looking for patterns of the form
3037
3038 (1) if (...) x = a; else x = b;
3039 (2) x = b; if (...) x = a;
3040 (3) if (...) x = a; // as if with an initial x = x.
3041
3042 The later patterns require jumps to be more expensive.
3043 For the if (...) x = a; else x = b; case we allow multiple insns
3044 inside the then and else blocks as long as their only effect is
3045 to calculate a value for x.
3046 ??? For future expansion, look for multiple X in such patterns. */
3047
3048 if (! bb_valid_for_noce_process_p (then_bb, cond, &if_info->then_cost,
3049 &if_info->then_simple))
3050 return false;
3051
3052 if (else_bb
3053 && ! bb_valid_for_noce_process_p (else_bb, cond, &if_info->else_cost,
3054 &if_info->else_simple))
3055 return false;
3056
3057 insn_a = last_active_insn (then_bb, FALSE);
3058 set_a = single_set (insn_a);
3059 gcc_assert (set_a);
3060
3061 x = SET_DEST (set_a);
3062 a = SET_SRC (set_a);
3063
3064 /* Look for the other potential set. Make sure we've got equivalent
3065 destinations. */
3066 /* ??? This is overconservative. Storing to two different mems is
3067 as easy as conditionally computing the address. Storing to a
3068 single mem merely requires a scratch memory to use as one of the
3069 destination addresses; often the memory immediately below the
3070 stack pointer is available for this. */
3071 set_b = NULL_RTX;
3072 if (else_bb)
3073 {
3074 insn_b = last_active_insn (else_bb, FALSE);
3075 set_b = single_set (insn_b);
3076 gcc_assert (set_b);
3077
3078 if (!rtx_interchangeable_p (x, SET_DEST (set_b)))
3079 return FALSE;
3080 }
3081 else
3082 {
3083 insn_b = prev_nonnote_nondebug_insn (if_info->cond_earliest);
3084 /* We're going to be moving the evaluation of B down from above
3085 COND_EARLIEST to JUMP. Make sure the relevant data is still
3086 intact. */
3087 if (! insn_b
3088 || BLOCK_FOR_INSN (insn_b) != BLOCK_FOR_INSN (if_info->cond_earliest)
3089 || !NONJUMP_INSN_P (insn_b)
3090 || (set_b = single_set (insn_b)) == NULL_RTX
3091 || ! rtx_interchangeable_p (x, SET_DEST (set_b))
3092 || ! noce_operand_ok (SET_SRC (set_b))
3093 || reg_overlap_mentioned_p (x, SET_SRC (set_b))
3094 || modified_between_p (SET_SRC (set_b), insn_b, jump)
3095 /* Avoid extending the lifetime of hard registers on small
3096 register class machines. */
3097 || (REG_P (SET_SRC (set_b))
3098 && HARD_REGISTER_P (SET_SRC (set_b))
3099 && targetm.small_register_classes_for_mode_p
3100 (GET_MODE (SET_SRC (set_b))))
3101 /* Likewise with X. In particular this can happen when
3102 noce_get_condition looks farther back in the instruction
3103 stream than one might expect. */
3104 || reg_overlap_mentioned_p (x, cond)
3105 || reg_overlap_mentioned_p (x, a)
3106 || modified_between_p (x, insn_b, jump))
3107 {
3108 insn_b = NULL;
3109 set_b = NULL_RTX;
3110 }
3111 }
3112
3113 /* If x has side effects then only the if-then-else form is safe to
3114 convert. But even in that case we would need to restore any notes
3115 (such as REG_INC) at then end. That can be tricky if
3116 noce_emit_move_insn expands to more than one insn, so disable the
3117 optimization entirely for now if there are side effects. */
3118 if (side_effects_p (x))
3119 return FALSE;
3120
3121 b = (set_b ? SET_SRC (set_b) : x);
3122
3123 /* Only operate on register destinations, and even then avoid extending
3124 the lifetime of hard registers on small register class machines. */
3125 orig_x = x;
3126 if (!REG_P (x)
3127 || (HARD_REGISTER_P (x)
3128 && targetm.small_register_classes_for_mode_p (GET_MODE (x))))
3129 {
3130 if (GET_MODE (x) == BLKmode)
3131 return FALSE;
3132
3133 if (GET_CODE (x) == ZERO_EXTRACT
3134 && (!CONST_INT_P (XEXP (x, 1))
3135 || !CONST_INT_P (XEXP (x, 2))))
3136 return FALSE;
3137
3138 x = gen_reg_rtx (GET_MODE (GET_CODE (x) == STRICT_LOW_PART
3139 ? XEXP (x, 0) : x));
3140 }
3141
3142 /* Don't operate on sources that may trap or are volatile. */
3143 if (! noce_operand_ok (a) || ! noce_operand_ok (b))
3144 return FALSE;
3145
3146 retry:
3147 /* Set up the info block for our subroutines. */
3148 if_info->insn_a = insn_a;
3149 if_info->insn_b = insn_b;
3150 if_info->x = x;
3151 if_info->a = a;
3152 if_info->b = b;
3153
3154 /* Try optimizations in some approximation of a useful order. */
3155 /* ??? Should first look to see if X is live incoming at all. If it
3156 isn't, we don't need anything but an unconditional set. */
3157
3158 /* Look and see if A and B are really the same. Avoid creating silly
3159 cmove constructs that no one will fix up later. */
3160 if (noce_simple_bbs (if_info)
3161 && rtx_interchangeable_p (a, b))
3162 {
3163 /* If we have an INSN_B, we don't have to create any new rtl. Just
3164 move the instruction that we already have. If we don't have an
3165 INSN_B, that means that A == X, and we've got a noop move. In
3166 that case don't do anything and let the code below delete INSN_A. */
3167 if (insn_b && else_bb)
3168 {
3169 rtx note;
3170
3171 if (else_bb && insn_b == BB_END (else_bb))
3172 BB_END (else_bb) = PREV_INSN (insn_b);
3173 reorder_insns (insn_b, insn_b, PREV_INSN (jump));
3174
3175 /* If there was a REG_EQUAL note, delete it since it may have been
3176 true due to this insn being after a jump. */
3177 if ((note = find_reg_note (insn_b, REG_EQUAL, NULL_RTX)) != 0)
3178 remove_note (insn_b, note);
3179
3180 insn_b = NULL;
3181 }
3182 /* If we have "x = b; if (...) x = a;", and x has side-effects, then
3183 x must be executed twice. */
3184 else if (insn_b && side_effects_p (orig_x))
3185 return FALSE;
3186
3187 x = orig_x;
3188 goto success;
3189 }
3190
3191 if (!set_b && MEM_P (orig_x))
3192 {
3193 /* Disallow the "if (...) x = a;" form (implicit "else x = x;")
3194 for optimizations if writing to x may trap or fault,
3195 i.e. it's a memory other than a static var or a stack slot,
3196 is misaligned on strict aligned machines or is read-only. If
3197 x is a read-only memory, then the program is valid only if we
3198 avoid the store into it. If there are stores on both the
3199 THEN and ELSE arms, then we can go ahead with the conversion;
3200 either the program is broken, or the condition is always
3201 false such that the other memory is selected. */
3202 if (noce_mem_write_may_trap_or_fault_p (orig_x))
3203 return FALSE;
3204
3205 /* Avoid store speculation: given "if (...) x = a" where x is a
3206 MEM, we only want to do the store if x is always set
3207 somewhere in the function. This avoids cases like
3208 if (pthread_mutex_trylock(mutex))
3209 ++global_variable;
3210 where we only want global_variable to be changed if the mutex
3211 is held. FIXME: This should ideally be expressed directly in
3212 RTL somehow. */
3213 if (!noce_can_store_speculate_p (test_bb, orig_x))
3214 return FALSE;
3215 }
3216
3217 if (noce_try_move (if_info))
3218 goto success;
3219 if (noce_try_store_flag (if_info))
3220 goto success;
3221 if (noce_try_bitop (if_info))
3222 goto success;
3223 if (noce_try_minmax (if_info))
3224 goto success;
3225 if (noce_try_abs (if_info))
3226 goto success;
3227 if (!targetm.have_conditional_execution ()
3228 && noce_try_store_flag_constants (if_info))
3229 goto success;
3230 if (HAVE_conditional_move
3231 && noce_try_cmove (if_info))
3232 goto success;
3233 if (! targetm.have_conditional_execution ())
3234 {
3235 if (noce_try_addcc (if_info))
3236 goto success;
3237 if (noce_try_store_flag_mask (if_info))
3238 goto success;
3239 if (HAVE_conditional_move
3240 && noce_try_cmove_arith (if_info))
3241 goto success;
3242 if (noce_try_sign_mask (if_info))
3243 goto success;
3244 }
3245
3246 if (!else_bb && set_b)
3247 {
3248 insn_b = NULL;
3249 set_b = NULL_RTX;
3250 b = orig_x;
3251 goto retry;
3252 }
3253
3254 return FALSE;
3255
3256 success:
3257
3258 /* If we used a temporary, fix it up now. */
3259 if (orig_x != x)
3260 {
3261 rtx_insn *seq;
3262
3263 start_sequence ();
3264 noce_emit_move_insn (orig_x, x);
3265 seq = get_insns ();
3266 set_used_flags (orig_x);
3267 unshare_all_rtl_in_chain (seq);
3268 end_sequence ();
3269
3270 emit_insn_before_setloc (seq, BB_END (test_bb), INSN_LOCATION (insn_a));
3271 }
3272
3273 /* The original THEN and ELSE blocks may now be removed. The test block
3274 must now jump to the join block. If the test block and the join block
3275 can be merged, do so. */
3276 if (else_bb)
3277 {
3278 delete_basic_block (else_bb);
3279 num_true_changes++;
3280 }
3281 else
3282 remove_edge (find_edge (test_bb, join_bb));
3283
3284 remove_edge (find_edge (then_bb, join_bb));
3285 redirect_edge_and_branch_force (single_succ_edge (test_bb), join_bb);
3286 delete_basic_block (then_bb);
3287 num_true_changes++;
3288
3289 if (can_merge_blocks_p (test_bb, join_bb))
3290 {
3291 merge_blocks (test_bb, join_bb);
3292 num_true_changes++;
3293 }
3294
3295 num_updated_if_blocks++;
3296 return TRUE;
3297 }
3298
3299 /* Check whether a block is suitable for conditional move conversion.
3300 Every insn must be a simple set of a register to a constant or a
3301 register. For each assignment, store the value in the pointer map
3302 VALS, keyed indexed by register pointer, then store the register
3303 pointer in REGS. COND is the condition we will test. */
3304
3305 static int
3306 check_cond_move_block (basic_block bb,
3307 hash_map<rtx, rtx> *vals,
3308 vec<rtx> *regs,
3309 rtx cond)
3310 {
3311 rtx_insn *insn;
3312 rtx cc = cc_in_cond (cond);
3313
3314 /* We can only handle simple jumps at the end of the basic block.
3315 It is almost impossible to update the CFG otherwise. */
3316 insn = BB_END (bb);
3317 if (JUMP_P (insn) && !onlyjump_p (insn))
3318 return FALSE;
3319
3320 FOR_BB_INSNS (bb, insn)
3321 {
3322 rtx set, dest, src;
3323
3324 if (!NONDEBUG_INSN_P (insn) || JUMP_P (insn))
3325 continue;
3326 set = single_set (insn);
3327 if (!set)
3328 return FALSE;
3329
3330 dest = SET_DEST (set);
3331 src = SET_SRC (set);
3332 if (!REG_P (dest)
3333 || (HARD_REGISTER_P (dest)
3334 && targetm.small_register_classes_for_mode_p (GET_MODE (dest))))
3335 return FALSE;
3336
3337 if (!CONSTANT_P (src) && !register_operand (src, VOIDmode))
3338 return FALSE;
3339
3340 if (side_effects_p (src) || side_effects_p (dest))
3341 return FALSE;
3342
3343 if (may_trap_p (src) || may_trap_p (dest))
3344 return FALSE;
3345
3346 /* Don't try to handle this if the source register was
3347 modified earlier in the block. */
3348 if ((REG_P (src)
3349 && vals->get (src))
3350 || (GET_CODE (src) == SUBREG && REG_P (SUBREG_REG (src))
3351 && vals->get (SUBREG_REG (src))))
3352 return FALSE;
3353
3354 /* Don't try to handle this if the destination register was
3355 modified earlier in the block. */
3356 if (vals->get (dest))
3357 return FALSE;
3358
3359 /* Don't try to handle this if the condition uses the
3360 destination register. */
3361 if (reg_overlap_mentioned_p (dest, cond))
3362 return FALSE;
3363
3364 /* Don't try to handle this if the source register is modified
3365 later in the block. */
3366 if (!CONSTANT_P (src)
3367 && modified_between_p (src, insn, NEXT_INSN (BB_END (bb))))
3368 return FALSE;
3369
3370 /* Skip it if the instruction to be moved might clobber CC. */
3371 if (cc && set_of (cc, insn))
3372 return FALSE;
3373
3374 vals->put (dest, src);
3375
3376 regs->safe_push (dest);
3377 }
3378
3379 return TRUE;
3380 }
3381
3382 /* Given a basic block BB suitable for conditional move conversion,
3383 a condition COND, and pointer maps THEN_VALS and ELSE_VALS containing
3384 the register values depending on COND, emit the insns in the block as
3385 conditional moves. If ELSE_BLOCK is true, THEN_BB was already
3386 processed. The caller has started a sequence for the conversion.
3387 Return true if successful, false if something goes wrong. */
3388
3389 static bool
3390 cond_move_convert_if_block (struct noce_if_info *if_infop,
3391 basic_block bb, rtx cond,
3392 hash_map<rtx, rtx> *then_vals,
3393 hash_map<rtx, rtx> *else_vals,
3394 bool else_block_p)
3395 {
3396 enum rtx_code code;
3397 rtx_insn *insn;
3398 rtx cond_arg0, cond_arg1;
3399
3400 code = GET_CODE (cond);
3401 cond_arg0 = XEXP (cond, 0);
3402 cond_arg1 = XEXP (cond, 1);
3403
3404 FOR_BB_INSNS (bb, insn)
3405 {
3406 rtx set, target, dest, t, e;
3407
3408 /* ??? Maybe emit conditional debug insn? */
3409 if (!NONDEBUG_INSN_P (insn) || JUMP_P (insn))
3410 continue;
3411 set = single_set (insn);
3412 gcc_assert (set && REG_P (SET_DEST (set)));
3413
3414 dest = SET_DEST (set);
3415
3416 rtx *then_slot = then_vals->get (dest);
3417 rtx *else_slot = else_vals->get (dest);
3418 t = then_slot ? *then_slot : NULL_RTX;
3419 e = else_slot ? *else_slot : NULL_RTX;
3420
3421 if (else_block_p)
3422 {
3423 /* If this register was set in the then block, we already
3424 handled this case there. */
3425 if (t)
3426 continue;
3427 t = dest;
3428 gcc_assert (e);
3429 }
3430 else
3431 {
3432 gcc_assert (t);
3433 if (!e)
3434 e = dest;
3435 }
3436
3437 target = noce_emit_cmove (if_infop, dest, code, cond_arg0, cond_arg1,
3438 t, e);
3439 if (!target)
3440 return false;
3441
3442 if (target != dest)
3443 noce_emit_move_insn (dest, target);
3444 }
3445
3446 return true;
3447 }
3448
3449 /* Given a simple IF-THEN-JOIN or IF-THEN-ELSE-JOIN block, attempt to convert
3450 it using only conditional moves. Return TRUE if we were successful at
3451 converting the block. */
3452
3453 static int
3454 cond_move_process_if_block (struct noce_if_info *if_info)
3455 {
3456 basic_block test_bb = if_info->test_bb;
3457 basic_block then_bb = if_info->then_bb;
3458 basic_block else_bb = if_info->else_bb;
3459 basic_block join_bb = if_info->join_bb;
3460 rtx_insn *jump = if_info->jump;
3461 rtx cond = if_info->cond;
3462 rtx_insn *seq, *loc_insn;
3463 rtx reg;
3464 int c;
3465 vec<rtx> then_regs = vNULL;
3466 vec<rtx> else_regs = vNULL;
3467 unsigned int i;
3468 int success_p = FALSE;
3469
3470 /* Build a mapping for each block to the value used for each
3471 register. */
3472 hash_map<rtx, rtx> then_vals;
3473 hash_map<rtx, rtx> else_vals;
3474
3475 /* Make sure the blocks are suitable. */
3476 if (!check_cond_move_block (then_bb, &then_vals, &then_regs, cond)
3477 || (else_bb
3478 && !check_cond_move_block (else_bb, &else_vals, &else_regs, cond)))
3479 goto done;
3480
3481 /* Make sure the blocks can be used together. If the same register
3482 is set in both blocks, and is not set to a constant in both
3483 cases, then both blocks must set it to the same register. We
3484 have already verified that if it is set to a register, that the
3485 source register does not change after the assignment. Also count
3486 the number of registers set in only one of the blocks. */
3487 c = 0;
3488 FOR_EACH_VEC_ELT (then_regs, i, reg)
3489 {
3490 rtx *then_slot = then_vals.get (reg);
3491 rtx *else_slot = else_vals.get (reg);
3492
3493 gcc_checking_assert (then_slot);
3494 if (!else_slot)
3495 ++c;
3496 else
3497 {
3498 rtx then_val = *then_slot;
3499 rtx else_val = *else_slot;
3500 if (!CONSTANT_P (then_val) && !CONSTANT_P (else_val)
3501 && !rtx_equal_p (then_val, else_val))
3502 goto done;
3503 }
3504 }
3505
3506 /* Finish off c for MAX_CONDITIONAL_EXECUTE. */
3507 FOR_EACH_VEC_ELT (else_regs, i, reg)
3508 {
3509 gcc_checking_assert (else_vals.get (reg));
3510 if (!then_vals.get (reg))
3511 ++c;
3512 }
3513
3514 /* Make sure it is reasonable to convert this block. What matters
3515 is the number of assignments currently made in only one of the
3516 branches, since if we convert we are going to always execute
3517 them. */
3518 if (c > MAX_CONDITIONAL_EXECUTE)
3519 goto done;
3520
3521 /* Try to emit the conditional moves. First do the then block,
3522 then do anything left in the else blocks. */
3523 start_sequence ();
3524 if (!cond_move_convert_if_block (if_info, then_bb, cond,
3525 &then_vals, &else_vals, false)
3526 || (else_bb
3527 && !cond_move_convert_if_block (if_info, else_bb, cond,
3528 &then_vals, &else_vals, true)))
3529 {
3530 end_sequence ();
3531 goto done;
3532 }
3533 seq = end_ifcvt_sequence (if_info);
3534 if (!seq)
3535 goto done;
3536
3537 loc_insn = first_active_insn (then_bb);
3538 if (!loc_insn)
3539 {
3540 loc_insn = first_active_insn (else_bb);
3541 gcc_assert (loc_insn);
3542 }
3543 emit_insn_before_setloc (seq, jump, INSN_LOCATION (loc_insn));
3544
3545 if (else_bb)
3546 {
3547 delete_basic_block (else_bb);
3548 num_true_changes++;
3549 }
3550 else
3551 remove_edge (find_edge (test_bb, join_bb));
3552
3553 remove_edge (find_edge (then_bb, join_bb));
3554 redirect_edge_and_branch_force (single_succ_edge (test_bb), join_bb);
3555 delete_basic_block (then_bb);
3556 num_true_changes++;
3557
3558 if (can_merge_blocks_p (test_bb, join_bb))
3559 {
3560 merge_blocks (test_bb, join_bb);
3561 num_true_changes++;
3562 }
3563
3564 num_updated_if_blocks++;
3565
3566 success_p = TRUE;
3567
3568 done:
3569 then_regs.release ();
3570 else_regs.release ();
3571 return success_p;
3572 }
3573
3574 \f
3575 /* Determine if a given basic block heads a simple IF-THEN-JOIN or an
3576 IF-THEN-ELSE-JOIN block.
3577
3578 If so, we'll try to convert the insns to not require the branch,
3579 using only transformations that do not require conditional execution.
3580
3581 Return TRUE if we were successful at converting the block. */
3582
3583 static int
3584 noce_find_if_block (basic_block test_bb, edge then_edge, edge else_edge,
3585 int pass)
3586 {
3587 basic_block then_bb, else_bb, join_bb;
3588 bool then_else_reversed = false;
3589 rtx_insn *jump;
3590 rtx cond;
3591 rtx_insn *cond_earliest;
3592 struct noce_if_info if_info;
3593
3594 /* We only ever should get here before reload. */
3595 gcc_assert (!reload_completed);
3596
3597 /* Recognize an IF-THEN-ELSE-JOIN block. */
3598 if (single_pred_p (then_edge->dest)
3599 && single_succ_p (then_edge->dest)
3600 && single_pred_p (else_edge->dest)
3601 && single_succ_p (else_edge->dest)
3602 && single_succ (then_edge->dest) == single_succ (else_edge->dest))
3603 {
3604 then_bb = then_edge->dest;
3605 else_bb = else_edge->dest;
3606 join_bb = single_succ (then_bb);
3607 }
3608 /* Recognize an IF-THEN-JOIN block. */
3609 else if (single_pred_p (then_edge->dest)
3610 && single_succ_p (then_edge->dest)
3611 && single_succ (then_edge->dest) == else_edge->dest)
3612 {
3613 then_bb = then_edge->dest;
3614 else_bb = NULL_BLOCK;
3615 join_bb = else_edge->dest;
3616 }
3617 /* Recognize an IF-ELSE-JOIN block. We can have those because the order
3618 of basic blocks in cfglayout mode does not matter, so the fallthrough
3619 edge can go to any basic block (and not just to bb->next_bb, like in
3620 cfgrtl mode). */
3621 else if (single_pred_p (else_edge->dest)
3622 && single_succ_p (else_edge->dest)
3623 && single_succ (else_edge->dest) == then_edge->dest)
3624 {
3625 /* The noce transformations do not apply to IF-ELSE-JOIN blocks.
3626 To make this work, we have to invert the THEN and ELSE blocks
3627 and reverse the jump condition. */
3628 then_bb = else_edge->dest;
3629 else_bb = NULL_BLOCK;
3630 join_bb = single_succ (then_bb);
3631 then_else_reversed = true;
3632 }
3633 else
3634 /* Not a form we can handle. */
3635 return FALSE;
3636
3637 /* The edges of the THEN and ELSE blocks cannot have complex edges. */
3638 if (single_succ_edge (then_bb)->flags & EDGE_COMPLEX)
3639 return FALSE;
3640 if (else_bb
3641 && single_succ_edge (else_bb)->flags & EDGE_COMPLEX)
3642 return FALSE;
3643
3644 num_possible_if_blocks++;
3645
3646 if (dump_file)
3647 {
3648 fprintf (dump_file,
3649 "\nIF-THEN%s-JOIN block found, pass %d, test %d, then %d",
3650 (else_bb) ? "-ELSE" : "",
3651 pass, test_bb->index, then_bb->index);
3652
3653 if (else_bb)
3654 fprintf (dump_file, ", else %d", else_bb->index);
3655
3656 fprintf (dump_file, ", join %d\n", join_bb->index);
3657 }
3658
3659 /* If the conditional jump is more than just a conditional
3660 jump, then we can not do if-conversion on this block. */
3661 jump = BB_END (test_bb);
3662 if (! onlyjump_p (jump))
3663 return FALSE;
3664
3665 /* If this is not a standard conditional jump, we can't parse it. */
3666 cond = noce_get_condition (jump, &cond_earliest, then_else_reversed);
3667 if (!cond)
3668 return FALSE;
3669
3670 /* We must be comparing objects whose modes imply the size. */
3671 if (GET_MODE (XEXP (cond, 0)) == BLKmode)
3672 return FALSE;
3673
3674 /* Initialize an IF_INFO struct to pass around. */
3675 memset (&if_info, 0, sizeof if_info);
3676 if_info.test_bb = test_bb;
3677 if_info.then_bb = then_bb;
3678 if_info.else_bb = else_bb;
3679 if_info.join_bb = join_bb;
3680 if_info.cond = cond;
3681 if_info.cond_earliest = cond_earliest;
3682 if_info.jump = jump;
3683 if_info.then_else_reversed = then_else_reversed;
3684 if_info.branch_cost = BRANCH_COST (optimize_bb_for_speed_p (test_bb),
3685 predictable_edge_p (then_edge));
3686
3687 /* Do the real work. */
3688
3689 if (noce_process_if_block (&if_info))
3690 return TRUE;
3691
3692 if (HAVE_conditional_move
3693 && cond_move_process_if_block (&if_info))
3694 return TRUE;
3695
3696 return FALSE;
3697 }
3698 \f
3699
3700 /* Merge the blocks and mark for local life update. */
3701
3702 static void
3703 merge_if_block (struct ce_if_block * ce_info)
3704 {
3705 basic_block test_bb = ce_info->test_bb; /* last test block */
3706 basic_block then_bb = ce_info->then_bb; /* THEN */
3707 basic_block else_bb = ce_info->else_bb; /* ELSE or NULL */
3708 basic_block join_bb = ce_info->join_bb; /* join block */
3709 basic_block combo_bb;
3710
3711 /* All block merging is done into the lower block numbers. */
3712
3713 combo_bb = test_bb;
3714 df_set_bb_dirty (test_bb);
3715
3716 /* Merge any basic blocks to handle && and || subtests. Each of
3717 the blocks are on the fallthru path from the predecessor block. */
3718 if (ce_info->num_multiple_test_blocks > 0)
3719 {
3720 basic_block bb = test_bb;
3721 basic_block last_test_bb = ce_info->last_test_bb;
3722 basic_block fallthru = block_fallthru (bb);
3723
3724 do
3725 {
3726 bb = fallthru;
3727 fallthru = block_fallthru (bb);
3728 merge_blocks (combo_bb, bb);
3729 num_true_changes++;
3730 }
3731 while (bb != last_test_bb);
3732 }
3733
3734 /* Merge TEST block into THEN block. Normally the THEN block won't have a
3735 label, but it might if there were || tests. That label's count should be
3736 zero, and it normally should be removed. */
3737
3738 if (then_bb)
3739 {
3740 /* If THEN_BB has no successors, then there's a BARRIER after it.
3741 If COMBO_BB has more than one successor (THEN_BB), then that BARRIER
3742 is no longer needed, and in fact it is incorrect to leave it in
3743 the insn stream. */
3744 if (EDGE_COUNT (then_bb->succs) == 0
3745 && EDGE_COUNT (combo_bb->succs) > 1)
3746 {
3747 rtx_insn *end = NEXT_INSN (BB_END (then_bb));
3748 while (end && NOTE_P (end) && !NOTE_INSN_BASIC_BLOCK_P (end))
3749 end = NEXT_INSN (end);
3750
3751 if (end && BARRIER_P (end))
3752 delete_insn (end);
3753 }
3754 merge_blocks (combo_bb, then_bb);
3755 num_true_changes++;
3756 }
3757
3758 /* The ELSE block, if it existed, had a label. That label count
3759 will almost always be zero, but odd things can happen when labels
3760 get their addresses taken. */
3761 if (else_bb)
3762 {
3763 /* If ELSE_BB has no successors, then there's a BARRIER after it.
3764 If COMBO_BB has more than one successor (ELSE_BB), then that BARRIER
3765 is no longer needed, and in fact it is incorrect to leave it in
3766 the insn stream. */
3767 if (EDGE_COUNT (else_bb->succs) == 0
3768 && EDGE_COUNT (combo_bb->succs) > 1)
3769 {
3770 rtx_insn *end = NEXT_INSN (BB_END (else_bb));
3771 while (end && NOTE_P (end) && !NOTE_INSN_BASIC_BLOCK_P (end))
3772 end = NEXT_INSN (end);
3773
3774 if (end && BARRIER_P (end))
3775 delete_insn (end);
3776 }
3777 merge_blocks (combo_bb, else_bb);
3778 num_true_changes++;
3779 }
3780
3781 /* If there was no join block reported, that means it was not adjacent
3782 to the others, and so we cannot merge them. */
3783
3784 if (! join_bb)
3785 {
3786 rtx_insn *last = BB_END (combo_bb);
3787
3788 /* The outgoing edge for the current COMBO block should already
3789 be correct. Verify this. */
3790 if (EDGE_COUNT (combo_bb->succs) == 0)
3791 gcc_assert (find_reg_note (last, REG_NORETURN, NULL)
3792 || (NONJUMP_INSN_P (last)
3793 && GET_CODE (PATTERN (last)) == TRAP_IF
3794 && (TRAP_CONDITION (PATTERN (last))
3795 == const_true_rtx)));
3796
3797 else
3798 /* There should still be something at the end of the THEN or ELSE
3799 blocks taking us to our final destination. */
3800 gcc_assert (JUMP_P (last)
3801 || (EDGE_SUCC (combo_bb, 0)->dest
3802 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3803 && CALL_P (last)
3804 && SIBLING_CALL_P (last))
3805 || ((EDGE_SUCC (combo_bb, 0)->flags & EDGE_EH)
3806 && can_throw_internal (last)));
3807 }
3808
3809 /* The JOIN block may have had quite a number of other predecessors too.
3810 Since we've already merged the TEST, THEN and ELSE blocks, we should
3811 have only one remaining edge from our if-then-else diamond. If there
3812 is more than one remaining edge, it must come from elsewhere. There
3813 may be zero incoming edges if the THEN block didn't actually join
3814 back up (as with a call to a non-return function). */
3815 else if (EDGE_COUNT (join_bb->preds) < 2
3816 && join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3817 {
3818 /* We can merge the JOIN cleanly and update the dataflow try
3819 again on this pass.*/
3820 merge_blocks (combo_bb, join_bb);
3821 num_true_changes++;
3822 }
3823 else
3824 {
3825 /* We cannot merge the JOIN. */
3826
3827 /* The outgoing edge for the current COMBO block should already
3828 be correct. Verify this. */
3829 gcc_assert (single_succ_p (combo_bb)
3830 && single_succ (combo_bb) == join_bb);
3831
3832 /* Remove the jump and cruft from the end of the COMBO block. */
3833 if (join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3834 tidy_fallthru_edge (single_succ_edge (combo_bb));
3835 }
3836
3837 num_updated_if_blocks++;
3838 }
3839 \f
3840 /* Find a block ending in a simple IF condition and try to transform it
3841 in some way. When converting a multi-block condition, put the new code
3842 in the first such block and delete the rest. Return a pointer to this
3843 first block if some transformation was done. Return NULL otherwise. */
3844
3845 static basic_block
3846 find_if_header (basic_block test_bb, int pass)
3847 {
3848 ce_if_block ce_info;
3849 edge then_edge;
3850 edge else_edge;
3851
3852 /* The kind of block we're looking for has exactly two successors. */
3853 if (EDGE_COUNT (test_bb->succs) != 2)
3854 return NULL;
3855
3856 then_edge = EDGE_SUCC (test_bb, 0);
3857 else_edge = EDGE_SUCC (test_bb, 1);
3858
3859 if (df_get_bb_dirty (then_edge->dest))
3860 return NULL;
3861 if (df_get_bb_dirty (else_edge->dest))
3862 return NULL;
3863
3864 /* Neither edge should be abnormal. */
3865 if ((then_edge->flags & EDGE_COMPLEX)
3866 || (else_edge->flags & EDGE_COMPLEX))
3867 return NULL;
3868
3869 /* Nor exit the loop. */
3870 if ((then_edge->flags & EDGE_LOOP_EXIT)
3871 || (else_edge->flags & EDGE_LOOP_EXIT))
3872 return NULL;
3873
3874 /* The THEN edge is canonically the one that falls through. */
3875 if (then_edge->flags & EDGE_FALLTHRU)
3876 ;
3877 else if (else_edge->flags & EDGE_FALLTHRU)
3878 std::swap (then_edge, else_edge);
3879 else
3880 /* Otherwise this must be a multiway branch of some sort. */
3881 return NULL;
3882
3883 memset (&ce_info, 0, sizeof (ce_info));
3884 ce_info.test_bb = test_bb;
3885 ce_info.then_bb = then_edge->dest;
3886 ce_info.else_bb = else_edge->dest;
3887 ce_info.pass = pass;
3888
3889 #ifdef IFCVT_MACHDEP_INIT
3890 IFCVT_MACHDEP_INIT (&ce_info);
3891 #endif
3892
3893 if (!reload_completed
3894 && noce_find_if_block (test_bb, then_edge, else_edge, pass))
3895 goto success;
3896
3897 if (reload_completed
3898 && targetm.have_conditional_execution ()
3899 && cond_exec_find_if_block (&ce_info))
3900 goto success;
3901
3902 if (targetm.have_trap ()
3903 && optab_handler (ctrap_optab, word_mode) != CODE_FOR_nothing
3904 && find_cond_trap (test_bb, then_edge, else_edge))
3905 goto success;
3906
3907 if (dom_info_state (CDI_POST_DOMINATORS) >= DOM_NO_FAST_QUERY
3908 && (reload_completed || !targetm.have_conditional_execution ()))
3909 {
3910 if (find_if_case_1 (test_bb, then_edge, else_edge))
3911 goto success;
3912 if (find_if_case_2 (test_bb, then_edge, else_edge))
3913 goto success;
3914 }
3915
3916 return NULL;
3917
3918 success:
3919 if (dump_file)
3920 fprintf (dump_file, "Conversion succeeded on pass %d.\n", pass);
3921 /* Set this so we continue looking. */
3922 cond_exec_changed_p = TRUE;
3923 return ce_info.test_bb;
3924 }
3925
3926 /* Return true if a block has two edges, one of which falls through to the next
3927 block, and the other jumps to a specific block, so that we can tell if the
3928 block is part of an && test or an || test. Returns either -1 or the number
3929 of non-note, non-jump, non-USE/CLOBBER insns in the block. */
3930
3931 static int
3932 block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb)
3933 {
3934 edge cur_edge;
3935 int fallthru_p = FALSE;
3936 int jump_p = FALSE;
3937 rtx_insn *insn;
3938 rtx_insn *end;
3939 int n_insns = 0;
3940 edge_iterator ei;
3941
3942 if (!cur_bb || !target_bb)
3943 return -1;
3944
3945 /* If no edges, obviously it doesn't jump or fallthru. */
3946 if (EDGE_COUNT (cur_bb->succs) == 0)
3947 return FALSE;
3948
3949 FOR_EACH_EDGE (cur_edge, ei, cur_bb->succs)
3950 {
3951 if (cur_edge->flags & EDGE_COMPLEX)
3952 /* Anything complex isn't what we want. */
3953 return -1;
3954
3955 else if (cur_edge->flags & EDGE_FALLTHRU)
3956 fallthru_p = TRUE;
3957
3958 else if (cur_edge->dest == target_bb)
3959 jump_p = TRUE;
3960
3961 else
3962 return -1;
3963 }
3964
3965 if ((jump_p & fallthru_p) == 0)
3966 return -1;
3967
3968 /* Don't allow calls in the block, since this is used to group && and ||
3969 together for conditional execution support. ??? we should support
3970 conditional execution support across calls for IA-64 some day, but
3971 for now it makes the code simpler. */
3972 end = BB_END (cur_bb);
3973 insn = BB_HEAD (cur_bb);
3974
3975 while (insn != NULL_RTX)
3976 {
3977 if (CALL_P (insn))
3978 return -1;
3979
3980 if (INSN_P (insn)
3981 && !JUMP_P (insn)
3982 && !DEBUG_INSN_P (insn)
3983 && GET_CODE (PATTERN (insn)) != USE
3984 && GET_CODE (PATTERN (insn)) != CLOBBER)
3985 n_insns++;
3986
3987 if (insn == end)
3988 break;
3989
3990 insn = NEXT_INSN (insn);
3991 }
3992
3993 return n_insns;
3994 }
3995
3996 /* Determine if a given basic block heads a simple IF-THEN or IF-THEN-ELSE
3997 block. If so, we'll try to convert the insns to not require the branch.
3998 Return TRUE if we were successful at converting the block. */
3999
4000 static int
4001 cond_exec_find_if_block (struct ce_if_block * ce_info)
4002 {
4003 basic_block test_bb = ce_info->test_bb;
4004 basic_block then_bb = ce_info->then_bb;
4005 basic_block else_bb = ce_info->else_bb;
4006 basic_block join_bb = NULL_BLOCK;
4007 edge cur_edge;
4008 basic_block next;
4009 edge_iterator ei;
4010
4011 ce_info->last_test_bb = test_bb;
4012
4013 /* We only ever should get here after reload,
4014 and if we have conditional execution. */
4015 gcc_assert (reload_completed && targetm.have_conditional_execution ());
4016
4017 /* Discover if any fall through predecessors of the current test basic block
4018 were && tests (which jump to the else block) or || tests (which jump to
4019 the then block). */
4020 if (single_pred_p (test_bb)
4021 && single_pred_edge (test_bb)->flags == EDGE_FALLTHRU)
4022 {
4023 basic_block bb = single_pred (test_bb);
4024 basic_block target_bb;
4025 int max_insns = MAX_CONDITIONAL_EXECUTE;
4026 int n_insns;
4027
4028 /* Determine if the preceding block is an && or || block. */
4029 if ((n_insns = block_jumps_and_fallthru_p (bb, else_bb)) >= 0)
4030 {
4031 ce_info->and_and_p = TRUE;
4032 target_bb = else_bb;
4033 }
4034 else if ((n_insns = block_jumps_and_fallthru_p (bb, then_bb)) >= 0)
4035 {
4036 ce_info->and_and_p = FALSE;
4037 target_bb = then_bb;
4038 }
4039 else
4040 target_bb = NULL_BLOCK;
4041
4042 if (target_bb && n_insns <= max_insns)
4043 {
4044 int total_insns = 0;
4045 int blocks = 0;
4046
4047 ce_info->last_test_bb = test_bb;
4048
4049 /* Found at least one && or || block, look for more. */
4050 do
4051 {
4052 ce_info->test_bb = test_bb = bb;
4053 total_insns += n_insns;
4054 blocks++;
4055
4056 if (!single_pred_p (bb))
4057 break;
4058
4059 bb = single_pred (bb);
4060 n_insns = block_jumps_and_fallthru_p (bb, target_bb);
4061 }
4062 while (n_insns >= 0 && (total_insns + n_insns) <= max_insns);
4063
4064 ce_info->num_multiple_test_blocks = blocks;
4065 ce_info->num_multiple_test_insns = total_insns;
4066
4067 if (ce_info->and_and_p)
4068 ce_info->num_and_and_blocks = blocks;
4069 else
4070 ce_info->num_or_or_blocks = blocks;
4071 }
4072 }
4073
4074 /* The THEN block of an IF-THEN combo must have exactly one predecessor,
4075 other than any || blocks which jump to the THEN block. */
4076 if ((EDGE_COUNT (then_bb->preds) - ce_info->num_or_or_blocks) != 1)
4077 return FALSE;
4078
4079 /* The edges of the THEN and ELSE blocks cannot have complex edges. */
4080 FOR_EACH_EDGE (cur_edge, ei, then_bb->preds)
4081 {
4082 if (cur_edge->flags & EDGE_COMPLEX)
4083 return FALSE;
4084 }
4085
4086 FOR_EACH_EDGE (cur_edge, ei, else_bb->preds)
4087 {
4088 if (cur_edge->flags & EDGE_COMPLEX)
4089 return FALSE;
4090 }
4091
4092 /* The THEN block of an IF-THEN combo must have zero or one successors. */
4093 if (EDGE_COUNT (then_bb->succs) > 0
4094 && (!single_succ_p (then_bb)
4095 || (single_succ_edge (then_bb)->flags & EDGE_COMPLEX)
4096 || (epilogue_completed
4097 && tablejump_p (BB_END (then_bb), NULL, NULL))))
4098 return FALSE;
4099
4100 /* If the THEN block has no successors, conditional execution can still
4101 make a conditional call. Don't do this unless the ELSE block has
4102 only one incoming edge -- the CFG manipulation is too ugly otherwise.
4103 Check for the last insn of the THEN block being an indirect jump, which
4104 is listed as not having any successors, but confuses the rest of the CE
4105 code processing. ??? we should fix this in the future. */
4106 if (EDGE_COUNT (then_bb->succs) == 0)
4107 {
4108 if (single_pred_p (else_bb) && else_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4109 {
4110 rtx_insn *last_insn = BB_END (then_bb);
4111
4112 while (last_insn
4113 && NOTE_P (last_insn)
4114 && last_insn != BB_HEAD (then_bb))
4115 last_insn = PREV_INSN (last_insn);
4116
4117 if (last_insn
4118 && JUMP_P (last_insn)
4119 && ! simplejump_p (last_insn))
4120 return FALSE;
4121
4122 join_bb = else_bb;
4123 else_bb = NULL_BLOCK;
4124 }
4125 else
4126 return FALSE;
4127 }
4128
4129 /* If the THEN block's successor is the other edge out of the TEST block,
4130 then we have an IF-THEN combo without an ELSE. */
4131 else if (single_succ (then_bb) == else_bb)
4132 {
4133 join_bb = else_bb;
4134 else_bb = NULL_BLOCK;
4135 }
4136
4137 /* If the THEN and ELSE block meet in a subsequent block, and the ELSE
4138 has exactly one predecessor and one successor, and the outgoing edge
4139 is not complex, then we have an IF-THEN-ELSE combo. */
4140 else if (single_succ_p (else_bb)
4141 && single_succ (then_bb) == single_succ (else_bb)
4142 && single_pred_p (else_bb)
4143 && !(single_succ_edge (else_bb)->flags & EDGE_COMPLEX)
4144 && !(epilogue_completed
4145 && tablejump_p (BB_END (else_bb), NULL, NULL)))
4146 join_bb = single_succ (else_bb);
4147
4148 /* Otherwise it is not an IF-THEN or IF-THEN-ELSE combination. */
4149 else
4150 return FALSE;
4151
4152 num_possible_if_blocks++;
4153
4154 if (dump_file)
4155 {
4156 fprintf (dump_file,
4157 "\nIF-THEN%s block found, pass %d, start block %d "
4158 "[insn %d], then %d [%d]",
4159 (else_bb) ? "-ELSE" : "",
4160 ce_info->pass,
4161 test_bb->index,
4162 BB_HEAD (test_bb) ? (int)INSN_UID (BB_HEAD (test_bb)) : -1,
4163 then_bb->index,
4164 BB_HEAD (then_bb) ? (int)INSN_UID (BB_HEAD (then_bb)) : -1);
4165
4166 if (else_bb)
4167 fprintf (dump_file, ", else %d [%d]",
4168 else_bb->index,
4169 BB_HEAD (else_bb) ? (int)INSN_UID (BB_HEAD (else_bb)) : -1);
4170
4171 fprintf (dump_file, ", join %d [%d]",
4172 join_bb->index,
4173 BB_HEAD (join_bb) ? (int)INSN_UID (BB_HEAD (join_bb)) : -1);
4174
4175 if (ce_info->num_multiple_test_blocks > 0)
4176 fprintf (dump_file, ", %d %s block%s last test %d [%d]",
4177 ce_info->num_multiple_test_blocks,
4178 (ce_info->and_and_p) ? "&&" : "||",
4179 (ce_info->num_multiple_test_blocks == 1) ? "" : "s",
4180 ce_info->last_test_bb->index,
4181 ((BB_HEAD (ce_info->last_test_bb))
4182 ? (int)INSN_UID (BB_HEAD (ce_info->last_test_bb))
4183 : -1));
4184
4185 fputc ('\n', dump_file);
4186 }
4187
4188 /* Make sure IF, THEN, and ELSE, blocks are adjacent. Actually, we get the
4189 first condition for free, since we've already asserted that there's a
4190 fallthru edge from IF to THEN. Likewise for the && and || blocks, since
4191 we checked the FALLTHRU flag, those are already adjacent to the last IF
4192 block. */
4193 /* ??? As an enhancement, move the ELSE block. Have to deal with
4194 BLOCK notes, if by no other means than backing out the merge if they
4195 exist. Sticky enough I don't want to think about it now. */
4196 next = then_bb;
4197 if (else_bb && (next = next->next_bb) != else_bb)
4198 return FALSE;
4199 if ((next = next->next_bb) != join_bb
4200 && join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4201 {
4202 if (else_bb)
4203 join_bb = NULL;
4204 else
4205 return FALSE;
4206 }
4207
4208 /* Do the real work. */
4209
4210 ce_info->else_bb = else_bb;
4211 ce_info->join_bb = join_bb;
4212
4213 /* If we have && and || tests, try to first handle combining the && and ||
4214 tests into the conditional code, and if that fails, go back and handle
4215 it without the && and ||, which at present handles the && case if there
4216 was no ELSE block. */
4217 if (cond_exec_process_if_block (ce_info, TRUE))
4218 return TRUE;
4219
4220 if (ce_info->num_multiple_test_blocks)
4221 {
4222 cancel_changes (0);
4223
4224 if (cond_exec_process_if_block (ce_info, FALSE))
4225 return TRUE;
4226 }
4227
4228 return FALSE;
4229 }
4230
4231 /* Convert a branch over a trap, or a branch
4232 to a trap, into a conditional trap. */
4233
4234 static int
4235 find_cond_trap (basic_block test_bb, edge then_edge, edge else_edge)
4236 {
4237 basic_block then_bb = then_edge->dest;
4238 basic_block else_bb = else_edge->dest;
4239 basic_block other_bb, trap_bb;
4240 rtx_insn *trap, *jump;
4241 rtx cond;
4242 rtx_insn *cond_earliest;
4243 enum rtx_code code;
4244
4245 /* Locate the block with the trap instruction. */
4246 /* ??? While we look for no successors, we really ought to allow
4247 EH successors. Need to fix merge_if_block for that to work. */
4248 if ((trap = block_has_only_trap (then_bb)) != NULL)
4249 trap_bb = then_bb, other_bb = else_bb;
4250 else if ((trap = block_has_only_trap (else_bb)) != NULL)
4251 trap_bb = else_bb, other_bb = then_bb;
4252 else
4253 return FALSE;
4254
4255 if (dump_file)
4256 {
4257 fprintf (dump_file, "\nTRAP-IF block found, start %d, trap %d\n",
4258 test_bb->index, trap_bb->index);
4259 }
4260
4261 /* If this is not a standard conditional jump, we can't parse it. */
4262 jump = BB_END (test_bb);
4263 cond = noce_get_condition (jump, &cond_earliest, false);
4264 if (! cond)
4265 return FALSE;
4266
4267 /* If the conditional jump is more than just a conditional jump, then
4268 we can not do if-conversion on this block. */
4269 if (! onlyjump_p (jump))
4270 return FALSE;
4271
4272 /* We must be comparing objects whose modes imply the size. */
4273 if (GET_MODE (XEXP (cond, 0)) == BLKmode)
4274 return FALSE;
4275
4276 /* Reverse the comparison code, if necessary. */
4277 code = GET_CODE (cond);
4278 if (then_bb == trap_bb)
4279 {
4280 code = reversed_comparison_code (cond, jump);
4281 if (code == UNKNOWN)
4282 return FALSE;
4283 }
4284
4285 /* Attempt to generate the conditional trap. */
4286 rtx_insn *seq = gen_cond_trap (code, copy_rtx (XEXP (cond, 0)),
4287 copy_rtx (XEXP (cond, 1)),
4288 TRAP_CODE (PATTERN (trap)));
4289 if (seq == NULL)
4290 return FALSE;
4291
4292 /* Emit the new insns before cond_earliest. */
4293 emit_insn_before_setloc (seq, cond_earliest, INSN_LOCATION (trap));
4294
4295 /* Delete the trap block if possible. */
4296 remove_edge (trap_bb == then_bb ? then_edge : else_edge);
4297 df_set_bb_dirty (test_bb);
4298 df_set_bb_dirty (then_bb);
4299 df_set_bb_dirty (else_bb);
4300
4301 if (EDGE_COUNT (trap_bb->preds) == 0)
4302 {
4303 delete_basic_block (trap_bb);
4304 num_true_changes++;
4305 }
4306
4307 /* Wire together the blocks again. */
4308 if (current_ir_type () == IR_RTL_CFGLAYOUT)
4309 single_succ_edge (test_bb)->flags |= EDGE_FALLTHRU;
4310 else if (trap_bb == then_bb)
4311 {
4312 rtx lab = JUMP_LABEL (jump);
4313 rtx_insn *seq = targetm.gen_jump (lab);
4314 rtx_jump_insn *newjump = emit_jump_insn_after (seq, jump);
4315 LABEL_NUSES (lab) += 1;
4316 JUMP_LABEL (newjump) = lab;
4317 emit_barrier_after (newjump);
4318 }
4319 delete_insn (jump);
4320
4321 if (can_merge_blocks_p (test_bb, other_bb))
4322 {
4323 merge_blocks (test_bb, other_bb);
4324 num_true_changes++;
4325 }
4326
4327 num_updated_if_blocks++;
4328 return TRUE;
4329 }
4330
4331 /* Subroutine of find_cond_trap: if BB contains only a trap insn,
4332 return it. */
4333
4334 static rtx_insn *
4335 block_has_only_trap (basic_block bb)
4336 {
4337 rtx_insn *trap;
4338
4339 /* We're not the exit block. */
4340 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4341 return NULL;
4342
4343 /* The block must have no successors. */
4344 if (EDGE_COUNT (bb->succs) > 0)
4345 return NULL;
4346
4347 /* The only instruction in the THEN block must be the trap. */
4348 trap = first_active_insn (bb);
4349 if (! (trap == BB_END (bb)
4350 && GET_CODE (PATTERN (trap)) == TRAP_IF
4351 && TRAP_CONDITION (PATTERN (trap)) == const_true_rtx))
4352 return NULL;
4353
4354 return trap;
4355 }
4356
4357 /* Look for IF-THEN-ELSE cases in which one of THEN or ELSE is
4358 transformable, but not necessarily the other. There need be no
4359 JOIN block.
4360
4361 Return TRUE if we were successful at converting the block.
4362
4363 Cases we'd like to look at:
4364
4365 (1)
4366 if (test) goto over; // x not live
4367 x = a;
4368 goto label;
4369 over:
4370
4371 becomes
4372
4373 x = a;
4374 if (! test) goto label;
4375
4376 (2)
4377 if (test) goto E; // x not live
4378 x = big();
4379 goto L;
4380 E:
4381 x = b;
4382 goto M;
4383
4384 becomes
4385
4386 x = b;
4387 if (test) goto M;
4388 x = big();
4389 goto L;
4390
4391 (3) // This one's really only interesting for targets that can do
4392 // multiway branching, e.g. IA-64 BBB bundles. For other targets
4393 // it results in multiple branches on a cache line, which often
4394 // does not sit well with predictors.
4395
4396 if (test1) goto E; // predicted not taken
4397 x = a;
4398 if (test2) goto F;
4399 ...
4400 E:
4401 x = b;
4402 J:
4403
4404 becomes
4405
4406 x = a;
4407 if (test1) goto E;
4408 if (test2) goto F;
4409
4410 Notes:
4411
4412 (A) Don't do (2) if the branch is predicted against the block we're
4413 eliminating. Do it anyway if we can eliminate a branch; this requires
4414 that the sole successor of the eliminated block postdominate the other
4415 side of the if.
4416
4417 (B) With CE, on (3) we can steal from both sides of the if, creating
4418
4419 if (test1) x = a;
4420 if (!test1) x = b;
4421 if (test1) goto J;
4422 if (test2) goto F;
4423 ...
4424 J:
4425
4426 Again, this is most useful if J postdominates.
4427
4428 (C) CE substitutes for helpful life information.
4429
4430 (D) These heuristics need a lot of work. */
4431
4432 /* Tests for case 1 above. */
4433
4434 static int
4435 find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge)
4436 {
4437 basic_block then_bb = then_edge->dest;
4438 basic_block else_bb = else_edge->dest;
4439 basic_block new_bb;
4440 int then_bb_index, then_prob;
4441 rtx else_target = NULL_RTX;
4442
4443 /* If we are partitioning hot/cold basic blocks, we don't want to
4444 mess up unconditional or indirect jumps that cross between hot
4445 and cold sections.
4446
4447 Basic block partitioning may result in some jumps that appear to
4448 be optimizable (or blocks that appear to be mergeable), but which really
4449 must be left untouched (they are required to make it safely across
4450 partition boundaries). See the comments at the top of
4451 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4452
4453 if ((BB_END (then_bb)
4454 && JUMP_P (BB_END (then_bb))
4455 && CROSSING_JUMP_P (BB_END (then_bb)))
4456 || (BB_END (test_bb)
4457 && JUMP_P (BB_END (test_bb))
4458 && CROSSING_JUMP_P (BB_END (test_bb)))
4459 || (BB_END (else_bb)
4460 && JUMP_P (BB_END (else_bb))
4461 && CROSSING_JUMP_P (BB_END (else_bb))))
4462 return FALSE;
4463
4464 /* THEN has one successor. */
4465 if (!single_succ_p (then_bb))
4466 return FALSE;
4467
4468 /* THEN does not fall through, but is not strange either. */
4469 if (single_succ_edge (then_bb)->flags & (EDGE_COMPLEX | EDGE_FALLTHRU))
4470 return FALSE;
4471
4472 /* THEN has one predecessor. */
4473 if (!single_pred_p (then_bb))
4474 return FALSE;
4475
4476 /* THEN must do something. */
4477 if (forwarder_block_p (then_bb))
4478 return FALSE;
4479
4480 num_possible_if_blocks++;
4481 if (dump_file)
4482 fprintf (dump_file,
4483 "\nIF-CASE-1 found, start %d, then %d\n",
4484 test_bb->index, then_bb->index);
4485
4486 if (then_edge->probability)
4487 then_prob = REG_BR_PROB_BASE - then_edge->probability;
4488 else
4489 then_prob = REG_BR_PROB_BASE / 2;
4490
4491 /* We're speculating from the THEN path, we want to make sure the cost
4492 of speculation is within reason. */
4493 if (! cheap_bb_rtx_cost_p (then_bb, then_prob,
4494 COSTS_N_INSNS (BRANCH_COST (optimize_bb_for_speed_p (then_edge->src),
4495 predictable_edge_p (then_edge)))))
4496 return FALSE;
4497
4498 if (else_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4499 {
4500 rtx_insn *jump = BB_END (else_edge->src);
4501 gcc_assert (JUMP_P (jump));
4502 else_target = JUMP_LABEL (jump);
4503 }
4504
4505 /* Registers set are dead, or are predicable. */
4506 if (! dead_or_predicable (test_bb, then_bb, else_bb,
4507 single_succ_edge (then_bb), 1))
4508 return FALSE;
4509
4510 /* Conversion went ok, including moving the insns and fixing up the
4511 jump. Adjust the CFG to match. */
4512
4513 /* We can avoid creating a new basic block if then_bb is immediately
4514 followed by else_bb, i.e. deleting then_bb allows test_bb to fall
4515 through to else_bb. */
4516
4517 if (then_bb->next_bb == else_bb
4518 && then_bb->prev_bb == test_bb
4519 && else_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4520 {
4521 redirect_edge_succ (FALLTHRU_EDGE (test_bb), else_bb);
4522 new_bb = 0;
4523 }
4524 else if (else_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4525 new_bb = force_nonfallthru_and_redirect (FALLTHRU_EDGE (test_bb),
4526 else_bb, else_target);
4527 else
4528 new_bb = redirect_edge_and_branch_force (FALLTHRU_EDGE (test_bb),
4529 else_bb);
4530
4531 df_set_bb_dirty (test_bb);
4532 df_set_bb_dirty (else_bb);
4533
4534 then_bb_index = then_bb->index;
4535 delete_basic_block (then_bb);
4536
4537 /* Make rest of code believe that the newly created block is the THEN_BB
4538 block we removed. */
4539 if (new_bb)
4540 {
4541 df_bb_replace (then_bb_index, new_bb);
4542 /* This should have been done above via force_nonfallthru_and_redirect
4543 (possibly called from redirect_edge_and_branch_force). */
4544 gcc_checking_assert (BB_PARTITION (new_bb) == BB_PARTITION (test_bb));
4545 }
4546
4547 num_true_changes++;
4548 num_updated_if_blocks++;
4549
4550 return TRUE;
4551 }
4552
4553 /* Test for case 2 above. */
4554
4555 static int
4556 find_if_case_2 (basic_block test_bb, edge then_edge, edge else_edge)
4557 {
4558 basic_block then_bb = then_edge->dest;
4559 basic_block else_bb = else_edge->dest;
4560 edge else_succ;
4561 int then_prob, else_prob;
4562
4563 /* We do not want to speculate (empty) loop latches. */
4564 if (current_loops
4565 && else_bb->loop_father->latch == else_bb)
4566 return FALSE;
4567
4568 /* If we are partitioning hot/cold basic blocks, we don't want to
4569 mess up unconditional or indirect jumps that cross between hot
4570 and cold sections.
4571
4572 Basic block partitioning may result in some jumps that appear to
4573 be optimizable (or blocks that appear to be mergeable), but which really
4574 must be left untouched (they are required to make it safely across
4575 partition boundaries). See the comments at the top of
4576 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4577
4578 if ((BB_END (then_bb)
4579 && JUMP_P (BB_END (then_bb))
4580 && CROSSING_JUMP_P (BB_END (then_bb)))
4581 || (BB_END (test_bb)
4582 && JUMP_P (BB_END (test_bb))
4583 && CROSSING_JUMP_P (BB_END (test_bb)))
4584 || (BB_END (else_bb)
4585 && JUMP_P (BB_END (else_bb))
4586 && CROSSING_JUMP_P (BB_END (else_bb))))
4587 return FALSE;
4588
4589 /* ELSE has one successor. */
4590 if (!single_succ_p (else_bb))
4591 return FALSE;
4592 else
4593 else_succ = single_succ_edge (else_bb);
4594
4595 /* ELSE outgoing edge is not complex. */
4596 if (else_succ->flags & EDGE_COMPLEX)
4597 return FALSE;
4598
4599 /* ELSE has one predecessor. */
4600 if (!single_pred_p (else_bb))
4601 return FALSE;
4602
4603 /* THEN is not EXIT. */
4604 if (then_bb->index < NUM_FIXED_BLOCKS)
4605 return FALSE;
4606
4607 if (else_edge->probability)
4608 {
4609 else_prob = else_edge->probability;
4610 then_prob = REG_BR_PROB_BASE - else_prob;
4611 }
4612 else
4613 {
4614 else_prob = REG_BR_PROB_BASE / 2;
4615 then_prob = REG_BR_PROB_BASE / 2;
4616 }
4617
4618 /* ELSE is predicted or SUCC(ELSE) postdominates THEN. */
4619 if (else_prob > then_prob)
4620 ;
4621 else if (else_succ->dest->index < NUM_FIXED_BLOCKS
4622 || dominated_by_p (CDI_POST_DOMINATORS, then_bb,
4623 else_succ->dest))
4624 ;
4625 else
4626 return FALSE;
4627
4628 num_possible_if_blocks++;
4629 if (dump_file)
4630 fprintf (dump_file,
4631 "\nIF-CASE-2 found, start %d, else %d\n",
4632 test_bb->index, else_bb->index);
4633
4634 /* We're speculating from the ELSE path, we want to make sure the cost
4635 of speculation is within reason. */
4636 if (! cheap_bb_rtx_cost_p (else_bb, else_prob,
4637 COSTS_N_INSNS (BRANCH_COST (optimize_bb_for_speed_p (else_edge->src),
4638 predictable_edge_p (else_edge)))))
4639 return FALSE;
4640
4641 /* Registers set are dead, or are predicable. */
4642 if (! dead_or_predicable (test_bb, else_bb, then_bb, else_succ, 0))
4643 return FALSE;
4644
4645 /* Conversion went ok, including moving the insns and fixing up the
4646 jump. Adjust the CFG to match. */
4647
4648 df_set_bb_dirty (test_bb);
4649 df_set_bb_dirty (then_bb);
4650 delete_basic_block (else_bb);
4651
4652 num_true_changes++;
4653 num_updated_if_blocks++;
4654
4655 /* ??? We may now fallthru from one of THEN's successors into a join
4656 block. Rerun cleanup_cfg? Examine things manually? Wait? */
4657
4658 return TRUE;
4659 }
4660
4661 /* Used by the code above to perform the actual rtl transformations.
4662 Return TRUE if successful.
4663
4664 TEST_BB is the block containing the conditional branch. MERGE_BB
4665 is the block containing the code to manipulate. DEST_EDGE is an
4666 edge representing a jump to the join block; after the conversion,
4667 TEST_BB should be branching to its destination.
4668 REVERSEP is true if the sense of the branch should be reversed. */
4669
4670 static int
4671 dead_or_predicable (basic_block test_bb, basic_block merge_bb,
4672 basic_block other_bb, edge dest_edge, int reversep)
4673 {
4674 basic_block new_dest = dest_edge->dest;
4675 rtx_insn *head, *end, *jump;
4676 rtx_insn *earliest = NULL;
4677 rtx old_dest;
4678 bitmap merge_set = NULL;
4679 /* Number of pending changes. */
4680 int n_validated_changes = 0;
4681 rtx new_dest_label = NULL_RTX;
4682
4683 jump = BB_END (test_bb);
4684
4685 /* Find the extent of the real code in the merge block. */
4686 head = BB_HEAD (merge_bb);
4687 end = BB_END (merge_bb);
4688
4689 while (DEBUG_INSN_P (end) && end != head)
4690 end = PREV_INSN (end);
4691
4692 /* If merge_bb ends with a tablejump, predicating/moving insn's
4693 into test_bb and then deleting merge_bb will result in the jumptable
4694 that follows merge_bb being removed along with merge_bb and then we
4695 get an unresolved reference to the jumptable. */
4696 if (tablejump_p (end, NULL, NULL))
4697 return FALSE;
4698
4699 if (LABEL_P (head))
4700 head = NEXT_INSN (head);
4701 while (DEBUG_INSN_P (head) && head != end)
4702 head = NEXT_INSN (head);
4703 if (NOTE_P (head))
4704 {
4705 if (head == end)
4706 {
4707 head = end = NULL;
4708 goto no_body;
4709 }
4710 head = NEXT_INSN (head);
4711 while (DEBUG_INSN_P (head) && head != end)
4712 head = NEXT_INSN (head);
4713 }
4714
4715 if (JUMP_P (end))
4716 {
4717 if (!onlyjump_p (end))
4718 return FALSE;
4719 if (head == end)
4720 {
4721 head = end = NULL;
4722 goto no_body;
4723 }
4724 end = PREV_INSN (end);
4725 while (DEBUG_INSN_P (end) && end != head)
4726 end = PREV_INSN (end);
4727 }
4728
4729 /* Don't move frame-related insn across the conditional branch. This
4730 can lead to one of the paths of the branch having wrong unwind info. */
4731 if (epilogue_completed)
4732 {
4733 rtx_insn *insn = head;
4734 while (1)
4735 {
4736 if (INSN_P (insn) && RTX_FRAME_RELATED_P (insn))
4737 return FALSE;
4738 if (insn == end)
4739 break;
4740 insn = NEXT_INSN (insn);
4741 }
4742 }
4743
4744 /* Disable handling dead code by conditional execution if the machine needs
4745 to do anything funny with the tests, etc. */
4746 #ifndef IFCVT_MODIFY_TESTS
4747 if (targetm.have_conditional_execution ())
4748 {
4749 /* In the conditional execution case, we have things easy. We know
4750 the condition is reversible. We don't have to check life info
4751 because we're going to conditionally execute the code anyway.
4752 All that's left is making sure the insns involved can actually
4753 be predicated. */
4754
4755 rtx cond;
4756
4757 cond = cond_exec_get_condition (jump);
4758 if (! cond)
4759 return FALSE;
4760
4761 rtx note = find_reg_note (jump, REG_BR_PROB, NULL_RTX);
4762 int prob_val = (note ? XINT (note, 0) : -1);
4763
4764 if (reversep)
4765 {
4766 enum rtx_code rev = reversed_comparison_code (cond, jump);
4767 if (rev == UNKNOWN)
4768 return FALSE;
4769 cond = gen_rtx_fmt_ee (rev, GET_MODE (cond), XEXP (cond, 0),
4770 XEXP (cond, 1));
4771 if (prob_val >= 0)
4772 prob_val = REG_BR_PROB_BASE - prob_val;
4773 }
4774
4775 if (cond_exec_process_insns (NULL, head, end, cond, prob_val, 0)
4776 && verify_changes (0))
4777 n_validated_changes = num_validated_changes ();
4778 else
4779 cancel_changes (0);
4780
4781 earliest = jump;
4782 }
4783 #endif
4784
4785 /* If we allocated new pseudos (e.g. in the conditional move
4786 expander called from noce_emit_cmove), we must resize the
4787 array first. */
4788 if (max_regno < max_reg_num ())
4789 max_regno = max_reg_num ();
4790
4791 /* Try the NCE path if the CE path did not result in any changes. */
4792 if (n_validated_changes == 0)
4793 {
4794 rtx cond;
4795 rtx_insn *insn;
4796 regset live;
4797 bool success;
4798
4799 /* In the non-conditional execution case, we have to verify that there
4800 are no trapping operations, no calls, no references to memory, and
4801 that any registers modified are dead at the branch site. */
4802
4803 if (!any_condjump_p (jump))
4804 return FALSE;
4805
4806 /* Find the extent of the conditional. */
4807 cond = noce_get_condition (jump, &earliest, false);
4808 if (!cond)
4809 return FALSE;
4810
4811 live = BITMAP_ALLOC (&reg_obstack);
4812 simulate_backwards_to_point (merge_bb, live, end);
4813 success = can_move_insns_across (head, end, earliest, jump,
4814 merge_bb, live,
4815 df_get_live_in (other_bb), NULL);
4816 BITMAP_FREE (live);
4817 if (!success)
4818 return FALSE;
4819
4820 /* Collect the set of registers set in MERGE_BB. */
4821 merge_set = BITMAP_ALLOC (&reg_obstack);
4822
4823 FOR_BB_INSNS (merge_bb, insn)
4824 if (NONDEBUG_INSN_P (insn))
4825 df_simulate_find_defs (insn, merge_set);
4826
4827 /* If shrink-wrapping, disable this optimization when test_bb is
4828 the first basic block and merge_bb exits. The idea is to not
4829 move code setting up a return register as that may clobber a
4830 register used to pass function parameters, which then must be
4831 saved in caller-saved regs. A caller-saved reg requires the
4832 prologue, killing a shrink-wrap opportunity. */
4833 if ((SHRINK_WRAPPING_ENABLED && !epilogue_completed)
4834 && ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == test_bb
4835 && single_succ_p (new_dest)
4836 && single_succ (new_dest) == EXIT_BLOCK_PTR_FOR_FN (cfun)
4837 && bitmap_intersect_p (df_get_live_in (new_dest), merge_set))
4838 {
4839 regset return_regs;
4840 unsigned int i;
4841
4842 return_regs = BITMAP_ALLOC (&reg_obstack);
4843
4844 /* Start off with the intersection of regs used to pass
4845 params and regs used to return values. */
4846 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4847 if (FUNCTION_ARG_REGNO_P (i)
4848 && targetm.calls.function_value_regno_p (i))
4849 bitmap_set_bit (return_regs, INCOMING_REGNO (i));
4850
4851 bitmap_and_into (return_regs,
4852 df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4853 bitmap_and_into (return_regs,
4854 df_get_live_in (EXIT_BLOCK_PTR_FOR_FN (cfun)));
4855 if (!bitmap_empty_p (return_regs))
4856 {
4857 FOR_BB_INSNS_REVERSE (new_dest, insn)
4858 if (NONDEBUG_INSN_P (insn))
4859 {
4860 df_ref def;
4861
4862 /* If this insn sets any reg in return_regs, add all
4863 reg uses to the set of regs we're interested in. */
4864 FOR_EACH_INSN_DEF (def, insn)
4865 if (bitmap_bit_p (return_regs, DF_REF_REGNO (def)))
4866 {
4867 df_simulate_uses (insn, return_regs);
4868 break;
4869 }
4870 }
4871 if (bitmap_intersect_p (merge_set, return_regs))
4872 {
4873 BITMAP_FREE (return_regs);
4874 BITMAP_FREE (merge_set);
4875 return FALSE;
4876 }
4877 }
4878 BITMAP_FREE (return_regs);
4879 }
4880 }
4881
4882 no_body:
4883 /* We don't want to use normal invert_jump or redirect_jump because
4884 we don't want to delete_insn called. Also, we want to do our own
4885 change group management. */
4886
4887 old_dest = JUMP_LABEL (jump);
4888 if (other_bb != new_dest)
4889 {
4890 if (!any_condjump_p (jump))
4891 goto cancel;
4892
4893 if (JUMP_P (BB_END (dest_edge->src)))
4894 new_dest_label = JUMP_LABEL (BB_END (dest_edge->src));
4895 else if (new_dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4896 new_dest_label = ret_rtx;
4897 else
4898 new_dest_label = block_label (new_dest);
4899
4900 rtx_jump_insn *jump_insn = as_a <rtx_jump_insn *> (jump);
4901 if (reversep
4902 ? ! invert_jump_1 (jump_insn, new_dest_label)
4903 : ! redirect_jump_1 (jump_insn, new_dest_label))
4904 goto cancel;
4905 }
4906
4907 if (verify_changes (n_validated_changes))
4908 confirm_change_group ();
4909 else
4910 goto cancel;
4911
4912 if (other_bb != new_dest)
4913 {
4914 redirect_jump_2 (as_a <rtx_jump_insn *> (jump), old_dest, new_dest_label,
4915 0, reversep);
4916
4917 redirect_edge_succ (BRANCH_EDGE (test_bb), new_dest);
4918 if (reversep)
4919 {
4920 std::swap (BRANCH_EDGE (test_bb)->count,
4921 FALLTHRU_EDGE (test_bb)->count);
4922 std::swap (BRANCH_EDGE (test_bb)->probability,
4923 FALLTHRU_EDGE (test_bb)->probability);
4924 update_br_prob_note (test_bb);
4925 }
4926 }
4927
4928 /* Move the insns out of MERGE_BB to before the branch. */
4929 if (head != NULL)
4930 {
4931 rtx_insn *insn;
4932
4933 if (end == BB_END (merge_bb))
4934 BB_END (merge_bb) = PREV_INSN (head);
4935
4936 /* PR 21767: when moving insns above a conditional branch, the REG_EQUAL
4937 notes being moved might become invalid. */
4938 insn = head;
4939 do
4940 {
4941 rtx note;
4942
4943 if (! INSN_P (insn))
4944 continue;
4945 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
4946 if (! note)
4947 continue;
4948 remove_note (insn, note);
4949 } while (insn != end && (insn = NEXT_INSN (insn)));
4950
4951 /* PR46315: when moving insns above a conditional branch, the REG_EQUAL
4952 notes referring to the registers being set might become invalid. */
4953 if (merge_set)
4954 {
4955 unsigned i;
4956 bitmap_iterator bi;
4957
4958 EXECUTE_IF_SET_IN_BITMAP (merge_set, 0, i, bi)
4959 remove_reg_equal_equiv_notes_for_regno (i);
4960
4961 BITMAP_FREE (merge_set);
4962 }
4963
4964 reorder_insns (head, end, PREV_INSN (earliest));
4965 }
4966
4967 /* Remove the jump and edge if we can. */
4968 if (other_bb == new_dest)
4969 {
4970 delete_insn (jump);
4971 remove_edge (BRANCH_EDGE (test_bb));
4972 /* ??? Can't merge blocks here, as then_bb is still in use.
4973 At minimum, the merge will get done just before bb-reorder. */
4974 }
4975
4976 return TRUE;
4977
4978 cancel:
4979 cancel_changes (0);
4980
4981 if (merge_set)
4982 BITMAP_FREE (merge_set);
4983
4984 return FALSE;
4985 }
4986 \f
4987 /* Main entry point for all if-conversion. AFTER_COMBINE is true if
4988 we are after combine pass. */
4989
4990 static void
4991 if_convert (bool after_combine)
4992 {
4993 basic_block bb;
4994 int pass;
4995
4996 if (optimize == 1)
4997 {
4998 df_live_add_problem ();
4999 df_live_set_all_dirty ();
5000 }
5001
5002 /* Record whether we are after combine pass. */
5003 ifcvt_after_combine = after_combine;
5004 have_cbranchcc4 = (direct_optab_handler (cbranch_optab, CCmode)
5005 != CODE_FOR_nothing);
5006 num_possible_if_blocks = 0;
5007 num_updated_if_blocks = 0;
5008 num_true_changes = 0;
5009
5010 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
5011 mark_loop_exit_edges ();
5012 loop_optimizer_finalize ();
5013 free_dominance_info (CDI_DOMINATORS);
5014
5015 /* Compute postdominators. */
5016 calculate_dominance_info (CDI_POST_DOMINATORS);
5017
5018 df_set_flags (DF_LR_RUN_DCE);
5019
5020 /* Go through each of the basic blocks looking for things to convert. If we
5021 have conditional execution, we make multiple passes to allow us to handle
5022 IF-THEN{-ELSE} blocks within other IF-THEN{-ELSE} blocks. */
5023 pass = 0;
5024 do
5025 {
5026 df_analyze ();
5027 /* Only need to do dce on the first pass. */
5028 df_clear_flags (DF_LR_RUN_DCE);
5029 cond_exec_changed_p = FALSE;
5030 pass++;
5031
5032 #ifdef IFCVT_MULTIPLE_DUMPS
5033 if (dump_file && pass > 1)
5034 fprintf (dump_file, "\n\n========== Pass %d ==========\n", pass);
5035 #endif
5036
5037 FOR_EACH_BB_FN (bb, cfun)
5038 {
5039 basic_block new_bb;
5040 while (!df_get_bb_dirty (bb)
5041 && (new_bb = find_if_header (bb, pass)) != NULL)
5042 bb = new_bb;
5043 }
5044
5045 #ifdef IFCVT_MULTIPLE_DUMPS
5046 if (dump_file && cond_exec_changed_p)
5047 print_rtl_with_bb (dump_file, get_insns (), dump_flags);
5048 #endif
5049 }
5050 while (cond_exec_changed_p);
5051
5052 #ifdef IFCVT_MULTIPLE_DUMPS
5053 if (dump_file)
5054 fprintf (dump_file, "\n\n========== no more changes\n");
5055 #endif
5056
5057 free_dominance_info (CDI_POST_DOMINATORS);
5058
5059 if (dump_file)
5060 fflush (dump_file);
5061
5062 clear_aux_for_blocks ();
5063
5064 /* If we allocated new pseudos, we must resize the array for sched1. */
5065 if (max_regno < max_reg_num ())
5066 max_regno = max_reg_num ();
5067
5068 /* Write the final stats. */
5069 if (dump_file && num_possible_if_blocks > 0)
5070 {
5071 fprintf (dump_file,
5072 "\n%d possible IF blocks searched.\n",
5073 num_possible_if_blocks);
5074 fprintf (dump_file,
5075 "%d IF blocks converted.\n",
5076 num_updated_if_blocks);
5077 fprintf (dump_file,
5078 "%d true changes made.\n\n\n",
5079 num_true_changes);
5080 }
5081
5082 if (optimize == 1)
5083 df_remove_problem (df_live);
5084
5085 checking_verify_flow_info ();
5086 }
5087 \f
5088 /* If-conversion and CFG cleanup. */
5089 static unsigned int
5090 rest_of_handle_if_conversion (void)
5091 {
5092 if (flag_if_conversion)
5093 {
5094 if (dump_file)
5095 {
5096 dump_reg_info (dump_file);
5097 dump_flow_info (dump_file, dump_flags);
5098 }
5099 cleanup_cfg (CLEANUP_EXPENSIVE);
5100 if_convert (false);
5101 }
5102
5103 cleanup_cfg (0);
5104 return 0;
5105 }
5106
5107 namespace {
5108
5109 const pass_data pass_data_rtl_ifcvt =
5110 {
5111 RTL_PASS, /* type */
5112 "ce1", /* name */
5113 OPTGROUP_NONE, /* optinfo_flags */
5114 TV_IFCVT, /* tv_id */
5115 0, /* properties_required */
5116 0, /* properties_provided */
5117 0, /* properties_destroyed */
5118 0, /* todo_flags_start */
5119 TODO_df_finish, /* todo_flags_finish */
5120 };
5121
5122 class pass_rtl_ifcvt : public rtl_opt_pass
5123 {
5124 public:
5125 pass_rtl_ifcvt (gcc::context *ctxt)
5126 : rtl_opt_pass (pass_data_rtl_ifcvt, ctxt)
5127 {}
5128
5129 /* opt_pass methods: */
5130 virtual bool gate (function *)
5131 {
5132 return (optimize > 0) && dbg_cnt (if_conversion);
5133 }
5134
5135 virtual unsigned int execute (function *)
5136 {
5137 return rest_of_handle_if_conversion ();
5138 }
5139
5140 }; // class pass_rtl_ifcvt
5141
5142 } // anon namespace
5143
5144 rtl_opt_pass *
5145 make_pass_rtl_ifcvt (gcc::context *ctxt)
5146 {
5147 return new pass_rtl_ifcvt (ctxt);
5148 }
5149
5150
5151 /* Rerun if-conversion, as combine may have simplified things enough
5152 to now meet sequence length restrictions. */
5153
5154 namespace {
5155
5156 const pass_data pass_data_if_after_combine =
5157 {
5158 RTL_PASS, /* type */
5159 "ce2", /* name */
5160 OPTGROUP_NONE, /* optinfo_flags */
5161 TV_IFCVT, /* tv_id */
5162 0, /* properties_required */
5163 0, /* properties_provided */
5164 0, /* properties_destroyed */
5165 0, /* todo_flags_start */
5166 TODO_df_finish, /* todo_flags_finish */
5167 };
5168
5169 class pass_if_after_combine : public rtl_opt_pass
5170 {
5171 public:
5172 pass_if_after_combine (gcc::context *ctxt)
5173 : rtl_opt_pass (pass_data_if_after_combine, ctxt)
5174 {}
5175
5176 /* opt_pass methods: */
5177 virtual bool gate (function *)
5178 {
5179 return optimize > 0 && flag_if_conversion
5180 && dbg_cnt (if_after_combine);
5181 }
5182
5183 virtual unsigned int execute (function *)
5184 {
5185 if_convert (true);
5186 return 0;
5187 }
5188
5189 }; // class pass_if_after_combine
5190
5191 } // anon namespace
5192
5193 rtl_opt_pass *
5194 make_pass_if_after_combine (gcc::context *ctxt)
5195 {
5196 return new pass_if_after_combine (ctxt);
5197 }
5198
5199
5200 namespace {
5201
5202 const pass_data pass_data_if_after_reload =
5203 {
5204 RTL_PASS, /* type */
5205 "ce3", /* name */
5206 OPTGROUP_NONE, /* optinfo_flags */
5207 TV_IFCVT2, /* tv_id */
5208 0, /* properties_required */
5209 0, /* properties_provided */
5210 0, /* properties_destroyed */
5211 0, /* todo_flags_start */
5212 TODO_df_finish, /* todo_flags_finish */
5213 };
5214
5215 class pass_if_after_reload : public rtl_opt_pass
5216 {
5217 public:
5218 pass_if_after_reload (gcc::context *ctxt)
5219 : rtl_opt_pass (pass_data_if_after_reload, ctxt)
5220 {}
5221
5222 /* opt_pass methods: */
5223 virtual bool gate (function *)
5224 {
5225 return optimize > 0 && flag_if_conversion2
5226 && dbg_cnt (if_after_reload);
5227 }
5228
5229 virtual unsigned int execute (function *)
5230 {
5231 if_convert (true);
5232 return 0;
5233 }
5234
5235 }; // class pass_if_after_reload
5236
5237 } // anon namespace
5238
5239 rtl_opt_pass *
5240 make_pass_if_after_reload (gcc::context *ctxt)
5241 {
5242 return new pass_if_after_reload (ctxt);
5243 }