]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ifcvt.c
ifcvt.c (noce_emit_cmove, [...]): Allow CC mode if HAVE_cbranchcc4.
[thirdparty/gcc.git] / gcc / ifcvt.c
1 /* If-conversion support.
2 Copyright (C) 2000-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hashtab.h"
28 #include "hash-set.h"
29 #include "vec.h"
30 #include "machmode.h"
31 #include "hard-reg-set.h"
32 #include "input.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "recog.h"
37 #include "except.h"
38 #include "predict.h"
39 #include "dominance.h"
40 #include "cfg.h"
41 #include "cfgrtl.h"
42 #include "cfganal.h"
43 #include "cfgcleanup.h"
44 #include "basic-block.h"
45 #include "expr.h"
46 #include "output.h"
47 #include "insn-codes.h"
48 #include "optabs.h"
49 #include "diagnostic-core.h"
50 #include "tm_p.h"
51 #include "cfgloop.h"
52 #include "target.h"
53 #include "tree-pass.h"
54 #include "df.h"
55 #include "dbgcnt.h"
56 #include "shrink-wrap.h"
57 #include "ifcvt.h"
58
59 #ifndef HAVE_conditional_move
60 #define HAVE_conditional_move 0
61 #endif
62 #ifndef HAVE_incscc
63 #define HAVE_incscc 0
64 #endif
65 #ifndef HAVE_decscc
66 #define HAVE_decscc 0
67 #endif
68 #ifndef HAVE_trap
69 #define HAVE_trap 0
70 #endif
71
72 #ifndef MAX_CONDITIONAL_EXECUTE
73 #define MAX_CONDITIONAL_EXECUTE \
74 (BRANCH_COST (optimize_function_for_speed_p (cfun), false) \
75 + 1)
76 #endif
77
78 #define IFCVT_MULTIPLE_DUMPS 1
79
80 #define NULL_BLOCK ((basic_block) NULL)
81
82 /* True if after combine pass. */
83 static bool ifcvt_after_combine;
84
85 /* # of IF-THEN or IF-THEN-ELSE blocks we looked at */
86 static int num_possible_if_blocks;
87
88 /* # of IF-THEN or IF-THEN-ELSE blocks were converted to conditional
89 execution. */
90 static int num_updated_if_blocks;
91
92 /* # of changes made. */
93 static int num_true_changes;
94
95 /* Whether conditional execution changes were made. */
96 static int cond_exec_changed_p;
97
98 /* Forward references. */
99 static int count_bb_insns (const_basic_block);
100 static bool cheap_bb_rtx_cost_p (const_basic_block, int, int);
101 static rtx_insn *first_active_insn (basic_block);
102 static rtx_insn *last_active_insn (basic_block, int);
103 static rtx_insn *find_active_insn_before (basic_block, rtx_insn *);
104 static rtx_insn *find_active_insn_after (basic_block, rtx_insn *);
105 static basic_block block_fallthru (basic_block);
106 static int cond_exec_process_insns (ce_if_block *, rtx_insn *, rtx, rtx, int,
107 int);
108 static rtx cond_exec_get_condition (rtx_insn *);
109 static rtx noce_get_condition (rtx_insn *, rtx_insn **, bool);
110 static int noce_operand_ok (const_rtx);
111 static void merge_if_block (ce_if_block *);
112 static int find_cond_trap (basic_block, edge, edge);
113 static basic_block find_if_header (basic_block, int);
114 static int block_jumps_and_fallthru_p (basic_block, basic_block);
115 static int noce_find_if_block (basic_block, edge, edge, int);
116 static int cond_exec_find_if_block (ce_if_block *);
117 static int find_if_case_1 (basic_block, edge, edge);
118 static int find_if_case_2 (basic_block, edge, edge);
119 static int dead_or_predicable (basic_block, basic_block, basic_block,
120 edge, int);
121 static void noce_emit_move_insn (rtx, rtx);
122 static rtx_insn *block_has_only_trap (basic_block);
123 \f
124 /* Count the number of non-jump active insns in BB. */
125
126 static int
127 count_bb_insns (const_basic_block bb)
128 {
129 int count = 0;
130 rtx_insn *insn = BB_HEAD (bb);
131
132 while (1)
133 {
134 if (active_insn_p (insn) && !JUMP_P (insn))
135 count++;
136
137 if (insn == BB_END (bb))
138 break;
139 insn = NEXT_INSN (insn);
140 }
141
142 return count;
143 }
144
145 /* Determine whether the total insn_rtx_cost on non-jump insns in
146 basic block BB is less than MAX_COST. This function returns
147 false if the cost of any instruction could not be estimated.
148
149 The cost of the non-jump insns in BB is scaled by REG_BR_PROB_BASE
150 as those insns are being speculated. MAX_COST is scaled with SCALE
151 plus a small fudge factor. */
152
153 static bool
154 cheap_bb_rtx_cost_p (const_basic_block bb, int scale, int max_cost)
155 {
156 int count = 0;
157 rtx_insn *insn = BB_HEAD (bb);
158 bool speed = optimize_bb_for_speed_p (bb);
159
160 /* Set scale to REG_BR_PROB_BASE to void the identical scaling
161 applied to insn_rtx_cost when optimizing for size. Only do
162 this after combine because if-conversion might interfere with
163 passes before combine.
164
165 Use optimize_function_for_speed_p instead of the pre-defined
166 variable speed to make sure it is set to same value for all
167 basic blocks in one if-conversion transformation. */
168 if (!optimize_function_for_speed_p (cfun) && ifcvt_after_combine)
169 scale = REG_BR_PROB_BASE;
170 /* Our branch probability/scaling factors are just estimates and don't
171 account for cases where we can get speculation for free and other
172 secondary benefits. So we fudge the scale factor to make speculating
173 appear a little more profitable when optimizing for performance. */
174 else
175 scale += REG_BR_PROB_BASE / 8;
176
177
178 max_cost *= scale;
179
180 while (1)
181 {
182 if (NONJUMP_INSN_P (insn))
183 {
184 int cost = insn_rtx_cost (PATTERN (insn), speed) * REG_BR_PROB_BASE;
185 if (cost == 0)
186 return false;
187
188 /* If this instruction is the load or set of a "stack" register,
189 such as a floating point register on x87, then the cost of
190 speculatively executing this insn may need to include
191 the additional cost of popping its result off of the
192 register stack. Unfortunately, correctly recognizing and
193 accounting for this additional overhead is tricky, so for
194 now we simply prohibit such speculative execution. */
195 #ifdef STACK_REGS
196 {
197 rtx set = single_set (insn);
198 if (set && STACK_REG_P (SET_DEST (set)))
199 return false;
200 }
201 #endif
202
203 count += cost;
204 if (count >= max_cost)
205 return false;
206 }
207 else if (CALL_P (insn))
208 return false;
209
210 if (insn == BB_END (bb))
211 break;
212 insn = NEXT_INSN (insn);
213 }
214
215 return true;
216 }
217
218 /* Return the first non-jump active insn in the basic block. */
219
220 static rtx_insn *
221 first_active_insn (basic_block bb)
222 {
223 rtx_insn *insn = BB_HEAD (bb);
224
225 if (LABEL_P (insn))
226 {
227 if (insn == BB_END (bb))
228 return NULL;
229 insn = NEXT_INSN (insn);
230 }
231
232 while (NOTE_P (insn) || DEBUG_INSN_P (insn))
233 {
234 if (insn == BB_END (bb))
235 return NULL;
236 insn = NEXT_INSN (insn);
237 }
238
239 if (JUMP_P (insn))
240 return NULL;
241
242 return insn;
243 }
244
245 /* Return the last non-jump active (non-jump) insn in the basic block. */
246
247 static rtx_insn *
248 last_active_insn (basic_block bb, int skip_use_p)
249 {
250 rtx_insn *insn = BB_END (bb);
251 rtx_insn *head = BB_HEAD (bb);
252
253 while (NOTE_P (insn)
254 || JUMP_P (insn)
255 || DEBUG_INSN_P (insn)
256 || (skip_use_p
257 && NONJUMP_INSN_P (insn)
258 && GET_CODE (PATTERN (insn)) == USE))
259 {
260 if (insn == head)
261 return NULL;
262 insn = PREV_INSN (insn);
263 }
264
265 if (LABEL_P (insn))
266 return NULL;
267
268 return insn;
269 }
270
271 /* Return the active insn before INSN inside basic block CURR_BB. */
272
273 static rtx_insn *
274 find_active_insn_before (basic_block curr_bb, rtx_insn *insn)
275 {
276 if (!insn || insn == BB_HEAD (curr_bb))
277 return NULL;
278
279 while ((insn = PREV_INSN (insn)) != NULL_RTX)
280 {
281 if (NONJUMP_INSN_P (insn) || JUMP_P (insn) || CALL_P (insn))
282 break;
283
284 /* No other active insn all the way to the start of the basic block. */
285 if (insn == BB_HEAD (curr_bb))
286 return NULL;
287 }
288
289 return insn;
290 }
291
292 /* Return the active insn after INSN inside basic block CURR_BB. */
293
294 static rtx_insn *
295 find_active_insn_after (basic_block curr_bb, rtx_insn *insn)
296 {
297 if (!insn || insn == BB_END (curr_bb))
298 return NULL;
299
300 while ((insn = NEXT_INSN (insn)) != NULL_RTX)
301 {
302 if (NONJUMP_INSN_P (insn) || JUMP_P (insn) || CALL_P (insn))
303 break;
304
305 /* No other active insn all the way to the end of the basic block. */
306 if (insn == BB_END (curr_bb))
307 return NULL;
308 }
309
310 return insn;
311 }
312
313 /* Return the basic block reached by falling though the basic block BB. */
314
315 static basic_block
316 block_fallthru (basic_block bb)
317 {
318 edge e = find_fallthru_edge (bb->succs);
319
320 return (e) ? e->dest : NULL_BLOCK;
321 }
322
323 /* Return true if RTXs A and B can be safely interchanged. */
324
325 static bool
326 rtx_interchangeable_p (const_rtx a, const_rtx b)
327 {
328 if (!rtx_equal_p (a, b))
329 return false;
330
331 if (GET_CODE (a) != MEM)
332 return true;
333
334 /* A dead type-unsafe memory reference is legal, but a live type-unsafe memory
335 reference is not. Interchanging a dead type-unsafe memory reference with
336 a live type-safe one creates a live type-unsafe memory reference, in other
337 words, it makes the program illegal.
338 We check here conservatively whether the two memory references have equal
339 memory attributes. */
340
341 return mem_attrs_eq_p (get_mem_attrs (a), get_mem_attrs (b));
342 }
343
344 \f
345 /* Go through a bunch of insns, converting them to conditional
346 execution format if possible. Return TRUE if all of the non-note
347 insns were processed. */
348
349 static int
350 cond_exec_process_insns (ce_if_block *ce_info ATTRIBUTE_UNUSED,
351 /* if block information */rtx_insn *start,
352 /* first insn to look at */rtx end,
353 /* last insn to look at */rtx test,
354 /* conditional execution test */int prob_val,
355 /* probability of branch taken. */int mod_ok)
356 {
357 int must_be_last = FALSE;
358 rtx_insn *insn;
359 rtx xtest;
360 rtx pattern;
361
362 if (!start || !end)
363 return FALSE;
364
365 for (insn = start; ; insn = NEXT_INSN (insn))
366 {
367 /* dwarf2out can't cope with conditional prologues. */
368 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_PROLOGUE_END)
369 return FALSE;
370
371 if (NOTE_P (insn) || DEBUG_INSN_P (insn))
372 goto insn_done;
373
374 gcc_assert (NONJUMP_INSN_P (insn) || CALL_P (insn));
375
376 /* dwarf2out can't cope with conditional unwind info. */
377 if (RTX_FRAME_RELATED_P (insn))
378 return FALSE;
379
380 /* Remove USE insns that get in the way. */
381 if (reload_completed && GET_CODE (PATTERN (insn)) == USE)
382 {
383 /* ??? Ug. Actually unlinking the thing is problematic,
384 given what we'd have to coordinate with our callers. */
385 SET_INSN_DELETED (insn);
386 goto insn_done;
387 }
388
389 /* Last insn wasn't last? */
390 if (must_be_last)
391 return FALSE;
392
393 if (modified_in_p (test, insn))
394 {
395 if (!mod_ok)
396 return FALSE;
397 must_be_last = TRUE;
398 }
399
400 /* Now build the conditional form of the instruction. */
401 pattern = PATTERN (insn);
402 xtest = copy_rtx (test);
403
404 /* If this is already a COND_EXEC, rewrite the test to be an AND of the
405 two conditions. */
406 if (GET_CODE (pattern) == COND_EXEC)
407 {
408 if (GET_MODE (xtest) != GET_MODE (COND_EXEC_TEST (pattern)))
409 return FALSE;
410
411 xtest = gen_rtx_AND (GET_MODE (xtest), xtest,
412 COND_EXEC_TEST (pattern));
413 pattern = COND_EXEC_CODE (pattern);
414 }
415
416 pattern = gen_rtx_COND_EXEC (VOIDmode, xtest, pattern);
417
418 /* If the machine needs to modify the insn being conditionally executed,
419 say for example to force a constant integer operand into a temp
420 register, do so here. */
421 #ifdef IFCVT_MODIFY_INSN
422 IFCVT_MODIFY_INSN (ce_info, pattern, insn);
423 if (! pattern)
424 return FALSE;
425 #endif
426
427 validate_change (insn, &PATTERN (insn), pattern, 1);
428
429 if (CALL_P (insn) && prob_val >= 0)
430 validate_change (insn, &REG_NOTES (insn),
431 gen_rtx_INT_LIST ((machine_mode) REG_BR_PROB,
432 prob_val, REG_NOTES (insn)), 1);
433
434 insn_done:
435 if (insn == end)
436 break;
437 }
438
439 return TRUE;
440 }
441
442 /* Return the condition for a jump. Do not do any special processing. */
443
444 static rtx
445 cond_exec_get_condition (rtx_insn *jump)
446 {
447 rtx test_if, cond;
448
449 if (any_condjump_p (jump))
450 test_if = SET_SRC (pc_set (jump));
451 else
452 return NULL_RTX;
453 cond = XEXP (test_if, 0);
454
455 /* If this branches to JUMP_LABEL when the condition is false,
456 reverse the condition. */
457 if (GET_CODE (XEXP (test_if, 2)) == LABEL_REF
458 && LABEL_REF_LABEL (XEXP (test_if, 2)) == JUMP_LABEL (jump))
459 {
460 enum rtx_code rev = reversed_comparison_code (cond, jump);
461 if (rev == UNKNOWN)
462 return NULL_RTX;
463
464 cond = gen_rtx_fmt_ee (rev, GET_MODE (cond), XEXP (cond, 0),
465 XEXP (cond, 1));
466 }
467
468 return cond;
469 }
470
471 /* Given a simple IF-THEN or IF-THEN-ELSE block, attempt to convert it
472 to conditional execution. Return TRUE if we were successful at
473 converting the block. */
474
475 static int
476 cond_exec_process_if_block (ce_if_block * ce_info,
477 /* if block information */int do_multiple_p)
478 {
479 basic_block test_bb = ce_info->test_bb; /* last test block */
480 basic_block then_bb = ce_info->then_bb; /* THEN */
481 basic_block else_bb = ce_info->else_bb; /* ELSE or NULL */
482 rtx test_expr; /* expression in IF_THEN_ELSE that is tested */
483 rtx_insn *then_start; /* first insn in THEN block */
484 rtx_insn *then_end; /* last insn + 1 in THEN block */
485 rtx_insn *else_start = NULL; /* first insn in ELSE block or NULL */
486 rtx_insn *else_end = NULL; /* last insn + 1 in ELSE block */
487 int max; /* max # of insns to convert. */
488 int then_mod_ok; /* whether conditional mods are ok in THEN */
489 rtx true_expr; /* test for else block insns */
490 rtx false_expr; /* test for then block insns */
491 int true_prob_val; /* probability of else block */
492 int false_prob_val; /* probability of then block */
493 rtx_insn *then_last_head = NULL; /* Last match at the head of THEN */
494 rtx_insn *else_last_head = NULL; /* Last match at the head of ELSE */
495 rtx_insn *then_first_tail = NULL; /* First match at the tail of THEN */
496 rtx_insn *else_first_tail = NULL; /* First match at the tail of ELSE */
497 int then_n_insns, else_n_insns, n_insns;
498 enum rtx_code false_code;
499 rtx note;
500
501 /* If test is comprised of && or || elements, and we've failed at handling
502 all of them together, just use the last test if it is the special case of
503 && elements without an ELSE block. */
504 if (!do_multiple_p && ce_info->num_multiple_test_blocks)
505 {
506 if (else_bb || ! ce_info->and_and_p)
507 return FALSE;
508
509 ce_info->test_bb = test_bb = ce_info->last_test_bb;
510 ce_info->num_multiple_test_blocks = 0;
511 ce_info->num_and_and_blocks = 0;
512 ce_info->num_or_or_blocks = 0;
513 }
514
515 /* Find the conditional jump to the ELSE or JOIN part, and isolate
516 the test. */
517 test_expr = cond_exec_get_condition (BB_END (test_bb));
518 if (! test_expr)
519 return FALSE;
520
521 /* If the conditional jump is more than just a conditional jump,
522 then we can not do conditional execution conversion on this block. */
523 if (! onlyjump_p (BB_END (test_bb)))
524 return FALSE;
525
526 /* Collect the bounds of where we're to search, skipping any labels, jumps
527 and notes at the beginning and end of the block. Then count the total
528 number of insns and see if it is small enough to convert. */
529 then_start = first_active_insn (then_bb);
530 then_end = last_active_insn (then_bb, TRUE);
531 then_n_insns = ce_info->num_then_insns = count_bb_insns (then_bb);
532 n_insns = then_n_insns;
533 max = MAX_CONDITIONAL_EXECUTE;
534
535 if (else_bb)
536 {
537 int n_matching;
538
539 max *= 2;
540 else_start = first_active_insn (else_bb);
541 else_end = last_active_insn (else_bb, TRUE);
542 else_n_insns = ce_info->num_else_insns = count_bb_insns (else_bb);
543 n_insns += else_n_insns;
544
545 /* Look for matching sequences at the head and tail of the two blocks,
546 and limit the range of insns to be converted if possible. */
547 n_matching = flow_find_cross_jump (then_bb, else_bb,
548 &then_first_tail, &else_first_tail,
549 NULL);
550 if (then_first_tail == BB_HEAD (then_bb))
551 then_start = then_end = NULL;
552 if (else_first_tail == BB_HEAD (else_bb))
553 else_start = else_end = NULL;
554
555 if (n_matching > 0)
556 {
557 if (then_end)
558 then_end = find_active_insn_before (then_bb, then_first_tail);
559 if (else_end)
560 else_end = find_active_insn_before (else_bb, else_first_tail);
561 n_insns -= 2 * n_matching;
562 }
563
564 if (then_start
565 && else_start
566 && then_n_insns > n_matching
567 && else_n_insns > n_matching)
568 {
569 int longest_match = MIN (then_n_insns - n_matching,
570 else_n_insns - n_matching);
571 n_matching
572 = flow_find_head_matching_sequence (then_bb, else_bb,
573 &then_last_head,
574 &else_last_head,
575 longest_match);
576
577 if (n_matching > 0)
578 {
579 rtx_insn *insn;
580
581 /* We won't pass the insns in the head sequence to
582 cond_exec_process_insns, so we need to test them here
583 to make sure that they don't clobber the condition. */
584 for (insn = BB_HEAD (then_bb);
585 insn != NEXT_INSN (then_last_head);
586 insn = NEXT_INSN (insn))
587 if (!LABEL_P (insn) && !NOTE_P (insn)
588 && !DEBUG_INSN_P (insn)
589 && modified_in_p (test_expr, insn))
590 return FALSE;
591 }
592
593 if (then_last_head == then_end)
594 then_start = then_end = NULL;
595 if (else_last_head == else_end)
596 else_start = else_end = NULL;
597
598 if (n_matching > 0)
599 {
600 if (then_start)
601 then_start = find_active_insn_after (then_bb, then_last_head);
602 if (else_start)
603 else_start = find_active_insn_after (else_bb, else_last_head);
604 n_insns -= 2 * n_matching;
605 }
606 }
607 }
608
609 if (n_insns > max)
610 return FALSE;
611
612 /* Map test_expr/test_jump into the appropriate MD tests to use on
613 the conditionally executed code. */
614
615 true_expr = test_expr;
616
617 false_code = reversed_comparison_code (true_expr, BB_END (test_bb));
618 if (false_code != UNKNOWN)
619 false_expr = gen_rtx_fmt_ee (false_code, GET_MODE (true_expr),
620 XEXP (true_expr, 0), XEXP (true_expr, 1));
621 else
622 false_expr = NULL_RTX;
623
624 #ifdef IFCVT_MODIFY_TESTS
625 /* If the machine description needs to modify the tests, such as setting a
626 conditional execution register from a comparison, it can do so here. */
627 IFCVT_MODIFY_TESTS (ce_info, true_expr, false_expr);
628
629 /* See if the conversion failed. */
630 if (!true_expr || !false_expr)
631 goto fail;
632 #endif
633
634 note = find_reg_note (BB_END (test_bb), REG_BR_PROB, NULL_RTX);
635 if (note)
636 {
637 true_prob_val = XINT (note, 0);
638 false_prob_val = REG_BR_PROB_BASE - true_prob_val;
639 }
640 else
641 {
642 true_prob_val = -1;
643 false_prob_val = -1;
644 }
645
646 /* If we have && or || tests, do them here. These tests are in the adjacent
647 blocks after the first block containing the test. */
648 if (ce_info->num_multiple_test_blocks > 0)
649 {
650 basic_block bb = test_bb;
651 basic_block last_test_bb = ce_info->last_test_bb;
652
653 if (! false_expr)
654 goto fail;
655
656 do
657 {
658 rtx_insn *start, *end;
659 rtx t, f;
660 enum rtx_code f_code;
661
662 bb = block_fallthru (bb);
663 start = first_active_insn (bb);
664 end = last_active_insn (bb, TRUE);
665 if (start
666 && ! cond_exec_process_insns (ce_info, start, end, false_expr,
667 false_prob_val, FALSE))
668 goto fail;
669
670 /* If the conditional jump is more than just a conditional jump, then
671 we can not do conditional execution conversion on this block. */
672 if (! onlyjump_p (BB_END (bb)))
673 goto fail;
674
675 /* Find the conditional jump and isolate the test. */
676 t = cond_exec_get_condition (BB_END (bb));
677 if (! t)
678 goto fail;
679
680 f_code = reversed_comparison_code (t, BB_END (bb));
681 if (f_code == UNKNOWN)
682 goto fail;
683
684 f = gen_rtx_fmt_ee (f_code, GET_MODE (t), XEXP (t, 0), XEXP (t, 1));
685 if (ce_info->and_and_p)
686 {
687 t = gen_rtx_AND (GET_MODE (t), true_expr, t);
688 f = gen_rtx_IOR (GET_MODE (t), false_expr, f);
689 }
690 else
691 {
692 t = gen_rtx_IOR (GET_MODE (t), true_expr, t);
693 f = gen_rtx_AND (GET_MODE (t), false_expr, f);
694 }
695
696 /* If the machine description needs to modify the tests, such as
697 setting a conditional execution register from a comparison, it can
698 do so here. */
699 #ifdef IFCVT_MODIFY_MULTIPLE_TESTS
700 IFCVT_MODIFY_MULTIPLE_TESTS (ce_info, bb, t, f);
701
702 /* See if the conversion failed. */
703 if (!t || !f)
704 goto fail;
705 #endif
706
707 true_expr = t;
708 false_expr = f;
709 }
710 while (bb != last_test_bb);
711 }
712
713 /* For IF-THEN-ELSE blocks, we don't allow modifications of the test
714 on then THEN block. */
715 then_mod_ok = (else_bb == NULL_BLOCK);
716
717 /* Go through the THEN and ELSE blocks converting the insns if possible
718 to conditional execution. */
719
720 if (then_end
721 && (! false_expr
722 || ! cond_exec_process_insns (ce_info, then_start, then_end,
723 false_expr, false_prob_val,
724 then_mod_ok)))
725 goto fail;
726
727 if (else_bb && else_end
728 && ! cond_exec_process_insns (ce_info, else_start, else_end,
729 true_expr, true_prob_val, TRUE))
730 goto fail;
731
732 /* If we cannot apply the changes, fail. Do not go through the normal fail
733 processing, since apply_change_group will call cancel_changes. */
734 if (! apply_change_group ())
735 {
736 #ifdef IFCVT_MODIFY_CANCEL
737 /* Cancel any machine dependent changes. */
738 IFCVT_MODIFY_CANCEL (ce_info);
739 #endif
740 return FALSE;
741 }
742
743 #ifdef IFCVT_MODIFY_FINAL
744 /* Do any machine dependent final modifications. */
745 IFCVT_MODIFY_FINAL (ce_info);
746 #endif
747
748 /* Conversion succeeded. */
749 if (dump_file)
750 fprintf (dump_file, "%d insn%s converted to conditional execution.\n",
751 n_insns, (n_insns == 1) ? " was" : "s were");
752
753 /* Merge the blocks! If we had matching sequences, make sure to delete one
754 copy at the appropriate location first: delete the copy in the THEN branch
755 for a tail sequence so that the remaining one is executed last for both
756 branches, and delete the copy in the ELSE branch for a head sequence so
757 that the remaining one is executed first for both branches. */
758 if (then_first_tail)
759 {
760 rtx_insn *from = then_first_tail;
761 if (!INSN_P (from))
762 from = find_active_insn_after (then_bb, from);
763 delete_insn_chain (from, BB_END (then_bb), false);
764 }
765 if (else_last_head)
766 delete_insn_chain (first_active_insn (else_bb), else_last_head, false);
767
768 merge_if_block (ce_info);
769 cond_exec_changed_p = TRUE;
770 return TRUE;
771
772 fail:
773 #ifdef IFCVT_MODIFY_CANCEL
774 /* Cancel any machine dependent changes. */
775 IFCVT_MODIFY_CANCEL (ce_info);
776 #endif
777
778 cancel_changes (0);
779 return FALSE;
780 }
781 \f
782 /* Used by noce_process_if_block to communicate with its subroutines.
783
784 The subroutines know that A and B may be evaluated freely. They
785 know that X is a register. They should insert new instructions
786 before cond_earliest. */
787
788 struct noce_if_info
789 {
790 /* The basic blocks that make up the IF-THEN-{ELSE-,}JOIN block. */
791 basic_block test_bb, then_bb, else_bb, join_bb;
792
793 /* The jump that ends TEST_BB. */
794 rtx_insn *jump;
795
796 /* The jump condition. */
797 rtx cond;
798
799 /* New insns should be inserted before this one. */
800 rtx_insn *cond_earliest;
801
802 /* Insns in the THEN and ELSE block. There is always just this
803 one insns in those blocks. The insns are single_set insns.
804 If there was no ELSE block, INSN_B is the last insn before
805 COND_EARLIEST, or NULL_RTX. In the former case, the insn
806 operands are still valid, as if INSN_B was moved down below
807 the jump. */
808 rtx_insn *insn_a, *insn_b;
809
810 /* The SET_SRC of INSN_A and INSN_B. */
811 rtx a, b;
812
813 /* The SET_DEST of INSN_A. */
814 rtx x;
815
816 /* True if this if block is not canonical. In the canonical form of
817 if blocks, the THEN_BB is the block reached via the fallthru edge
818 from TEST_BB. For the noce transformations, we allow the symmetric
819 form as well. */
820 bool then_else_reversed;
821
822 /* Estimated cost of the particular branch instruction. */
823 int branch_cost;
824 };
825
826 static rtx noce_emit_store_flag (struct noce_if_info *, rtx, int, int);
827 static int noce_try_move (struct noce_if_info *);
828 static int noce_try_store_flag (struct noce_if_info *);
829 static int noce_try_addcc (struct noce_if_info *);
830 static int noce_try_store_flag_constants (struct noce_if_info *);
831 static int noce_try_store_flag_mask (struct noce_if_info *);
832 static rtx noce_emit_cmove (struct noce_if_info *, rtx, enum rtx_code, rtx,
833 rtx, rtx, rtx);
834 static int noce_try_cmove (struct noce_if_info *);
835 static int noce_try_cmove_arith (struct noce_if_info *);
836 static rtx noce_get_alt_condition (struct noce_if_info *, rtx, rtx_insn **);
837 static int noce_try_minmax (struct noce_if_info *);
838 static int noce_try_abs (struct noce_if_info *);
839 static int noce_try_sign_mask (struct noce_if_info *);
840
841 /* Helper function for noce_try_store_flag*. */
842
843 static rtx
844 noce_emit_store_flag (struct noce_if_info *if_info, rtx x, int reversep,
845 int normalize)
846 {
847 rtx cond = if_info->cond;
848 int cond_complex;
849 enum rtx_code code;
850
851 cond_complex = (! general_operand (XEXP (cond, 0), VOIDmode)
852 || ! general_operand (XEXP (cond, 1), VOIDmode));
853
854 /* If earliest == jump, or when the condition is complex, try to
855 build the store_flag insn directly. */
856
857 if (cond_complex)
858 {
859 rtx set = pc_set (if_info->jump);
860 cond = XEXP (SET_SRC (set), 0);
861 if (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
862 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (if_info->jump))
863 reversep = !reversep;
864 if (if_info->then_else_reversed)
865 reversep = !reversep;
866 }
867
868 if (reversep)
869 code = reversed_comparison_code (cond, if_info->jump);
870 else
871 code = GET_CODE (cond);
872
873 if ((if_info->cond_earliest == if_info->jump || cond_complex)
874 && (normalize == 0 || STORE_FLAG_VALUE == normalize))
875 {
876 rtx src = gen_rtx_fmt_ee (code, GET_MODE (x), XEXP (cond, 0),
877 XEXP (cond, 1));
878 rtx set = gen_rtx_SET (VOIDmode, x, src);
879
880 start_sequence ();
881 rtx_insn *insn = emit_insn (set);
882
883 if (recog_memoized (insn) >= 0)
884 {
885 rtx_insn *seq = get_insns ();
886 end_sequence ();
887 emit_insn (seq);
888
889 if_info->cond_earliest = if_info->jump;
890
891 return x;
892 }
893
894 end_sequence ();
895 }
896
897 /* Don't even try if the comparison operands or the mode of X are weird. */
898 if (cond_complex || !SCALAR_INT_MODE_P (GET_MODE (x)))
899 return NULL_RTX;
900
901 return emit_store_flag (x, code, XEXP (cond, 0),
902 XEXP (cond, 1), VOIDmode,
903 (code == LTU || code == LEU
904 || code == GEU || code == GTU), normalize);
905 }
906
907 /* Emit instruction to move an rtx, possibly into STRICT_LOW_PART.
908 X is the destination/target and Y is the value to copy. */
909
910 static void
911 noce_emit_move_insn (rtx x, rtx y)
912 {
913 machine_mode outmode;
914 rtx outer, inner;
915 int bitpos;
916
917 if (GET_CODE (x) != STRICT_LOW_PART)
918 {
919 rtx_insn *seq, *insn;
920 rtx target;
921 optab ot;
922
923 start_sequence ();
924 /* Check that the SET_SRC is reasonable before calling emit_move_insn,
925 otherwise construct a suitable SET pattern ourselves. */
926 insn = (OBJECT_P (y) || CONSTANT_P (y) || GET_CODE (y) == SUBREG)
927 ? emit_move_insn (x, y)
928 : emit_insn (gen_rtx_SET (VOIDmode, x, y));
929 seq = get_insns ();
930 end_sequence ();
931
932 if (recog_memoized (insn) <= 0)
933 {
934 if (GET_CODE (x) == ZERO_EXTRACT)
935 {
936 rtx op = XEXP (x, 0);
937 unsigned HOST_WIDE_INT size = INTVAL (XEXP (x, 1));
938 unsigned HOST_WIDE_INT start = INTVAL (XEXP (x, 2));
939
940 /* store_bit_field expects START to be relative to
941 BYTES_BIG_ENDIAN and adjusts this value for machines with
942 BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN. In order to be able to
943 invoke store_bit_field again it is necessary to have the START
944 value from the first call. */
945 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
946 {
947 if (MEM_P (op))
948 start = BITS_PER_UNIT - start - size;
949 else
950 {
951 gcc_assert (REG_P (op));
952 start = BITS_PER_WORD - start - size;
953 }
954 }
955
956 gcc_assert (start < (MEM_P (op) ? BITS_PER_UNIT : BITS_PER_WORD));
957 store_bit_field (op, size, start, 0, 0, GET_MODE (x), y);
958 return;
959 }
960
961 switch (GET_RTX_CLASS (GET_CODE (y)))
962 {
963 case RTX_UNARY:
964 ot = code_to_optab (GET_CODE (y));
965 if (ot)
966 {
967 start_sequence ();
968 target = expand_unop (GET_MODE (y), ot, XEXP (y, 0), x, 0);
969 if (target != NULL_RTX)
970 {
971 if (target != x)
972 emit_move_insn (x, target);
973 seq = get_insns ();
974 }
975 end_sequence ();
976 }
977 break;
978
979 case RTX_BIN_ARITH:
980 case RTX_COMM_ARITH:
981 ot = code_to_optab (GET_CODE (y));
982 if (ot)
983 {
984 start_sequence ();
985 target = expand_binop (GET_MODE (y), ot,
986 XEXP (y, 0), XEXP (y, 1),
987 x, 0, OPTAB_DIRECT);
988 if (target != NULL_RTX)
989 {
990 if (target != x)
991 emit_move_insn (x, target);
992 seq = get_insns ();
993 }
994 end_sequence ();
995 }
996 break;
997
998 default:
999 break;
1000 }
1001 }
1002
1003 emit_insn (seq);
1004 return;
1005 }
1006
1007 outer = XEXP (x, 0);
1008 inner = XEXP (outer, 0);
1009 outmode = GET_MODE (outer);
1010 bitpos = SUBREG_BYTE (outer) * BITS_PER_UNIT;
1011 store_bit_field (inner, GET_MODE_BITSIZE (outmode), bitpos,
1012 0, 0, outmode, y);
1013 }
1014
1015 /* Return sequence of instructions generated by if conversion. This
1016 function calls end_sequence() to end the current stream, ensures
1017 that are instructions are unshared, recognizable non-jump insns.
1018 On failure, this function returns a NULL_RTX. */
1019
1020 static rtx_insn *
1021 end_ifcvt_sequence (struct noce_if_info *if_info)
1022 {
1023 rtx_insn *insn;
1024 rtx_insn *seq = get_insns ();
1025
1026 set_used_flags (if_info->x);
1027 set_used_flags (if_info->cond);
1028 set_used_flags (if_info->a);
1029 set_used_flags (if_info->b);
1030 unshare_all_rtl_in_chain (seq);
1031 end_sequence ();
1032
1033 /* Make sure that all of the instructions emitted are recognizable,
1034 and that we haven't introduced a new jump instruction.
1035 As an exercise for the reader, build a general mechanism that
1036 allows proper placement of required clobbers. */
1037 for (insn = seq; insn; insn = NEXT_INSN (insn))
1038 if (JUMP_P (insn)
1039 || recog_memoized (insn) == -1)
1040 return NULL;
1041
1042 return seq;
1043 }
1044
1045 /* Convert "if (a != b) x = a; else x = b" into "x = a" and
1046 "if (a == b) x = a; else x = b" into "x = b". */
1047
1048 static int
1049 noce_try_move (struct noce_if_info *if_info)
1050 {
1051 rtx cond = if_info->cond;
1052 enum rtx_code code = GET_CODE (cond);
1053 rtx y;
1054 rtx_insn *seq;
1055
1056 if (code != NE && code != EQ)
1057 return FALSE;
1058
1059 /* This optimization isn't valid if either A or B could be a NaN
1060 or a signed zero. */
1061 if (HONOR_NANS (GET_MODE (if_info->x))
1062 || HONOR_SIGNED_ZEROS (GET_MODE (if_info->x)))
1063 return FALSE;
1064
1065 /* Check whether the operands of the comparison are A and in
1066 either order. */
1067 if ((rtx_equal_p (if_info->a, XEXP (cond, 0))
1068 && rtx_equal_p (if_info->b, XEXP (cond, 1)))
1069 || (rtx_equal_p (if_info->a, XEXP (cond, 1))
1070 && rtx_equal_p (if_info->b, XEXP (cond, 0))))
1071 {
1072 if (!rtx_interchangeable_p (if_info->a, if_info->b))
1073 return FALSE;
1074
1075 y = (code == EQ) ? if_info->a : if_info->b;
1076
1077 /* Avoid generating the move if the source is the destination. */
1078 if (! rtx_equal_p (if_info->x, y))
1079 {
1080 start_sequence ();
1081 noce_emit_move_insn (if_info->x, y);
1082 seq = end_ifcvt_sequence (if_info);
1083 if (!seq)
1084 return FALSE;
1085
1086 emit_insn_before_setloc (seq, if_info->jump,
1087 INSN_LOCATION (if_info->insn_a));
1088 }
1089 return TRUE;
1090 }
1091 return FALSE;
1092 }
1093
1094 /* Convert "if (test) x = 1; else x = 0".
1095
1096 Only try 0 and STORE_FLAG_VALUE here. Other combinations will be
1097 tried in noce_try_store_flag_constants after noce_try_cmove has had
1098 a go at the conversion. */
1099
1100 static int
1101 noce_try_store_flag (struct noce_if_info *if_info)
1102 {
1103 int reversep;
1104 rtx target;
1105 rtx_insn *seq;
1106
1107 if (CONST_INT_P (if_info->b)
1108 && INTVAL (if_info->b) == STORE_FLAG_VALUE
1109 && if_info->a == const0_rtx)
1110 reversep = 0;
1111 else if (if_info->b == const0_rtx
1112 && CONST_INT_P (if_info->a)
1113 && INTVAL (if_info->a) == STORE_FLAG_VALUE
1114 && (reversed_comparison_code (if_info->cond, if_info->jump)
1115 != UNKNOWN))
1116 reversep = 1;
1117 else
1118 return FALSE;
1119
1120 start_sequence ();
1121
1122 target = noce_emit_store_flag (if_info, if_info->x, reversep, 0);
1123 if (target)
1124 {
1125 if (target != if_info->x)
1126 noce_emit_move_insn (if_info->x, target);
1127
1128 seq = end_ifcvt_sequence (if_info);
1129 if (! seq)
1130 return FALSE;
1131
1132 emit_insn_before_setloc (seq, if_info->jump,
1133 INSN_LOCATION (if_info->insn_a));
1134 return TRUE;
1135 }
1136 else
1137 {
1138 end_sequence ();
1139 return FALSE;
1140 }
1141 }
1142
1143 /* Convert "if (test) x = a; else x = b", for A and B constant. */
1144
1145 static int
1146 noce_try_store_flag_constants (struct noce_if_info *if_info)
1147 {
1148 rtx target;
1149 rtx_insn *seq;
1150 int reversep;
1151 HOST_WIDE_INT itrue, ifalse, diff, tmp;
1152 int normalize, can_reverse;
1153 machine_mode mode;
1154
1155 if (CONST_INT_P (if_info->a)
1156 && CONST_INT_P (if_info->b))
1157 {
1158 mode = GET_MODE (if_info->x);
1159 ifalse = INTVAL (if_info->a);
1160 itrue = INTVAL (if_info->b);
1161
1162 diff = (unsigned HOST_WIDE_INT) itrue - ifalse;
1163 /* Make sure we can represent the difference between the two values. */
1164 if ((diff > 0)
1165 != ((ifalse < 0) != (itrue < 0) ? ifalse < 0 : ifalse < itrue))
1166 return FALSE;
1167
1168 diff = trunc_int_for_mode (diff, mode);
1169
1170 can_reverse = (reversed_comparison_code (if_info->cond, if_info->jump)
1171 != UNKNOWN);
1172
1173 reversep = 0;
1174 if (diff == STORE_FLAG_VALUE || diff == -STORE_FLAG_VALUE)
1175 normalize = 0;
1176 else if (ifalse == 0 && exact_log2 (itrue) >= 0
1177 && (STORE_FLAG_VALUE == 1
1178 || if_info->branch_cost >= 2))
1179 normalize = 1;
1180 else if (itrue == 0 && exact_log2 (ifalse) >= 0 && can_reverse
1181 && (STORE_FLAG_VALUE == 1 || if_info->branch_cost >= 2))
1182 normalize = 1, reversep = 1;
1183 else if (itrue == -1
1184 && (STORE_FLAG_VALUE == -1
1185 || if_info->branch_cost >= 2))
1186 normalize = -1;
1187 else if (ifalse == -1 && can_reverse
1188 && (STORE_FLAG_VALUE == -1 || if_info->branch_cost >= 2))
1189 normalize = -1, reversep = 1;
1190 else if ((if_info->branch_cost >= 2 && STORE_FLAG_VALUE == -1)
1191 || if_info->branch_cost >= 3)
1192 normalize = -1;
1193 else
1194 return FALSE;
1195
1196 if (reversep)
1197 {
1198 tmp = itrue; itrue = ifalse; ifalse = tmp;
1199 diff = trunc_int_for_mode (-(unsigned HOST_WIDE_INT) diff, mode);
1200 }
1201
1202 start_sequence ();
1203 target = noce_emit_store_flag (if_info, if_info->x, reversep, normalize);
1204 if (! target)
1205 {
1206 end_sequence ();
1207 return FALSE;
1208 }
1209
1210 /* if (test) x = 3; else x = 4;
1211 => x = 3 + (test == 0); */
1212 if (diff == STORE_FLAG_VALUE || diff == -STORE_FLAG_VALUE)
1213 {
1214 target = expand_simple_binop (mode,
1215 (diff == STORE_FLAG_VALUE
1216 ? PLUS : MINUS),
1217 gen_int_mode (ifalse, mode), target,
1218 if_info->x, 0, OPTAB_WIDEN);
1219 }
1220
1221 /* if (test) x = 8; else x = 0;
1222 => x = (test != 0) << 3; */
1223 else if (ifalse == 0 && (tmp = exact_log2 (itrue)) >= 0)
1224 {
1225 target = expand_simple_binop (mode, ASHIFT,
1226 target, GEN_INT (tmp), if_info->x, 0,
1227 OPTAB_WIDEN);
1228 }
1229
1230 /* if (test) x = -1; else x = b;
1231 => x = -(test != 0) | b; */
1232 else if (itrue == -1)
1233 {
1234 target = expand_simple_binop (mode, IOR,
1235 target, gen_int_mode (ifalse, mode),
1236 if_info->x, 0, OPTAB_WIDEN);
1237 }
1238
1239 /* if (test) x = a; else x = b;
1240 => x = (-(test != 0) & (b - a)) + a; */
1241 else
1242 {
1243 target = expand_simple_binop (mode, AND,
1244 target, gen_int_mode (diff, mode),
1245 if_info->x, 0, OPTAB_WIDEN);
1246 if (target)
1247 target = expand_simple_binop (mode, PLUS,
1248 target, gen_int_mode (ifalse, mode),
1249 if_info->x, 0, OPTAB_WIDEN);
1250 }
1251
1252 if (! target)
1253 {
1254 end_sequence ();
1255 return FALSE;
1256 }
1257
1258 if (target != if_info->x)
1259 noce_emit_move_insn (if_info->x, target);
1260
1261 seq = end_ifcvt_sequence (if_info);
1262 if (!seq)
1263 return FALSE;
1264
1265 emit_insn_before_setloc (seq, if_info->jump,
1266 INSN_LOCATION (if_info->insn_a));
1267 return TRUE;
1268 }
1269
1270 return FALSE;
1271 }
1272
1273 /* Convert "if (test) foo++" into "foo += (test != 0)", and
1274 similarly for "foo--". */
1275
1276 static int
1277 noce_try_addcc (struct noce_if_info *if_info)
1278 {
1279 rtx target;
1280 rtx_insn *seq;
1281 int subtract, normalize;
1282
1283 if (GET_CODE (if_info->a) == PLUS
1284 && rtx_equal_p (XEXP (if_info->a, 0), if_info->b)
1285 && (reversed_comparison_code (if_info->cond, if_info->jump)
1286 != UNKNOWN))
1287 {
1288 rtx cond = if_info->cond;
1289 enum rtx_code code = reversed_comparison_code (cond, if_info->jump);
1290
1291 /* First try to use addcc pattern. */
1292 if (general_operand (XEXP (cond, 0), VOIDmode)
1293 && general_operand (XEXP (cond, 1), VOIDmode))
1294 {
1295 start_sequence ();
1296 target = emit_conditional_add (if_info->x, code,
1297 XEXP (cond, 0),
1298 XEXP (cond, 1),
1299 VOIDmode,
1300 if_info->b,
1301 XEXP (if_info->a, 1),
1302 GET_MODE (if_info->x),
1303 (code == LTU || code == GEU
1304 || code == LEU || code == GTU));
1305 if (target)
1306 {
1307 if (target != if_info->x)
1308 noce_emit_move_insn (if_info->x, target);
1309
1310 seq = end_ifcvt_sequence (if_info);
1311 if (!seq)
1312 return FALSE;
1313
1314 emit_insn_before_setloc (seq, if_info->jump,
1315 INSN_LOCATION (if_info->insn_a));
1316 return TRUE;
1317 }
1318 end_sequence ();
1319 }
1320
1321 /* If that fails, construct conditional increment or decrement using
1322 setcc. */
1323 if (if_info->branch_cost >= 2
1324 && (XEXP (if_info->a, 1) == const1_rtx
1325 || XEXP (if_info->a, 1) == constm1_rtx))
1326 {
1327 start_sequence ();
1328 if (STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
1329 subtract = 0, normalize = 0;
1330 else if (-STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
1331 subtract = 1, normalize = 0;
1332 else
1333 subtract = 0, normalize = INTVAL (XEXP (if_info->a, 1));
1334
1335
1336 target = noce_emit_store_flag (if_info,
1337 gen_reg_rtx (GET_MODE (if_info->x)),
1338 1, normalize);
1339
1340 if (target)
1341 target = expand_simple_binop (GET_MODE (if_info->x),
1342 subtract ? MINUS : PLUS,
1343 if_info->b, target, if_info->x,
1344 0, OPTAB_WIDEN);
1345 if (target)
1346 {
1347 if (target != if_info->x)
1348 noce_emit_move_insn (if_info->x, target);
1349
1350 seq = end_ifcvt_sequence (if_info);
1351 if (!seq)
1352 return FALSE;
1353
1354 emit_insn_before_setloc (seq, if_info->jump,
1355 INSN_LOCATION (if_info->insn_a));
1356 return TRUE;
1357 }
1358 end_sequence ();
1359 }
1360 }
1361
1362 return FALSE;
1363 }
1364
1365 /* Convert "if (test) x = 0;" to "x &= -(test == 0);" */
1366
1367 static int
1368 noce_try_store_flag_mask (struct noce_if_info *if_info)
1369 {
1370 rtx target;
1371 rtx_insn *seq;
1372 int reversep;
1373
1374 reversep = 0;
1375 if ((if_info->branch_cost >= 2
1376 || STORE_FLAG_VALUE == -1)
1377 && ((if_info->a == const0_rtx
1378 && rtx_equal_p (if_info->b, if_info->x))
1379 || ((reversep = (reversed_comparison_code (if_info->cond,
1380 if_info->jump)
1381 != UNKNOWN))
1382 && if_info->b == const0_rtx
1383 && rtx_equal_p (if_info->a, if_info->x))))
1384 {
1385 start_sequence ();
1386 target = noce_emit_store_flag (if_info,
1387 gen_reg_rtx (GET_MODE (if_info->x)),
1388 reversep, -1);
1389 if (target)
1390 target = expand_simple_binop (GET_MODE (if_info->x), AND,
1391 if_info->x,
1392 target, if_info->x, 0,
1393 OPTAB_WIDEN);
1394
1395 if (target)
1396 {
1397 if (target != if_info->x)
1398 noce_emit_move_insn (if_info->x, target);
1399
1400 seq = end_ifcvt_sequence (if_info);
1401 if (!seq)
1402 return FALSE;
1403
1404 emit_insn_before_setloc (seq, if_info->jump,
1405 INSN_LOCATION (if_info->insn_a));
1406 return TRUE;
1407 }
1408
1409 end_sequence ();
1410 }
1411
1412 return FALSE;
1413 }
1414
1415 /* Helper function for noce_try_cmove and noce_try_cmove_arith. */
1416
1417 static rtx
1418 noce_emit_cmove (struct noce_if_info *if_info, rtx x, enum rtx_code code,
1419 rtx cmp_a, rtx cmp_b, rtx vfalse, rtx vtrue)
1420 {
1421 rtx target ATTRIBUTE_UNUSED;
1422 int unsignedp ATTRIBUTE_UNUSED;
1423
1424 /* If earliest == jump, try to build the cmove insn directly.
1425 This is helpful when combine has created some complex condition
1426 (like for alpha's cmovlbs) that we can't hope to regenerate
1427 through the normal interface. */
1428
1429 if (if_info->cond_earliest == if_info->jump)
1430 {
1431 rtx cond = gen_rtx_fmt_ee (code, GET_MODE (if_info->cond), cmp_a, cmp_b);
1432 rtx if_then_else = gen_rtx_IF_THEN_ELSE (GET_MODE (x),
1433 cond, vtrue, vfalse);
1434 rtx set = gen_rtx_SET (VOIDmode, x, if_then_else);
1435
1436 start_sequence ();
1437 rtx_insn *insn = emit_insn (set);
1438
1439 if (recog_memoized (insn) >= 0)
1440 {
1441 rtx_insn *seq = get_insns ();
1442 end_sequence ();
1443 emit_insn (seq);
1444
1445 return x;
1446 }
1447
1448 end_sequence ();
1449 }
1450
1451 /* Don't even try if the comparison operands are weird
1452 except that the target supports cbranchcc4. */
1453 if (! general_operand (cmp_a, GET_MODE (cmp_a))
1454 || ! general_operand (cmp_b, GET_MODE (cmp_b)))
1455 {
1456 #if HAVE_cbranchcc4
1457 if (GET_MODE_CLASS (GET_MODE (cmp_a)) != MODE_CC
1458 || cmp_b != const0_rtx)
1459 #endif
1460 return NULL_RTX;
1461 }
1462
1463 #if HAVE_conditional_move
1464 unsignedp = (code == LTU || code == GEU
1465 || code == LEU || code == GTU);
1466
1467 target = emit_conditional_move (x, code, cmp_a, cmp_b, VOIDmode,
1468 vtrue, vfalse, GET_MODE (x),
1469 unsignedp);
1470 if (target)
1471 return target;
1472
1473 /* We might be faced with a situation like:
1474
1475 x = (reg:M TARGET)
1476 vtrue = (subreg:M (reg:N VTRUE) BYTE)
1477 vfalse = (subreg:M (reg:N VFALSE) BYTE)
1478
1479 We can't do a conditional move in mode M, but it's possible that we
1480 could do a conditional move in mode N instead and take a subreg of
1481 the result.
1482
1483 If we can't create new pseudos, though, don't bother. */
1484 if (reload_completed)
1485 return NULL_RTX;
1486
1487 if (GET_CODE (vtrue) == SUBREG && GET_CODE (vfalse) == SUBREG)
1488 {
1489 rtx reg_vtrue = SUBREG_REG (vtrue);
1490 rtx reg_vfalse = SUBREG_REG (vfalse);
1491 unsigned int byte_vtrue = SUBREG_BYTE (vtrue);
1492 unsigned int byte_vfalse = SUBREG_BYTE (vfalse);
1493 rtx promoted_target;
1494
1495 if (GET_MODE (reg_vtrue) != GET_MODE (reg_vfalse)
1496 || byte_vtrue != byte_vfalse
1497 || (SUBREG_PROMOTED_VAR_P (vtrue)
1498 != SUBREG_PROMOTED_VAR_P (vfalse))
1499 || (SUBREG_PROMOTED_GET (vtrue)
1500 != SUBREG_PROMOTED_GET (vfalse)))
1501 return NULL_RTX;
1502
1503 promoted_target = gen_reg_rtx (GET_MODE (reg_vtrue));
1504
1505 target = emit_conditional_move (promoted_target, code, cmp_a, cmp_b,
1506 VOIDmode, reg_vtrue, reg_vfalse,
1507 GET_MODE (reg_vtrue), unsignedp);
1508 /* Nope, couldn't do it in that mode either. */
1509 if (!target)
1510 return NULL_RTX;
1511
1512 target = gen_rtx_SUBREG (GET_MODE (vtrue), promoted_target, byte_vtrue);
1513 SUBREG_PROMOTED_VAR_P (target) = SUBREG_PROMOTED_VAR_P (vtrue);
1514 SUBREG_PROMOTED_SET (target, SUBREG_PROMOTED_GET (vtrue));
1515 emit_move_insn (x, target);
1516 return x;
1517 }
1518 else
1519 return NULL_RTX;
1520 #else
1521 /* We'll never get here, as noce_process_if_block doesn't call the
1522 functions involved. Ifdef code, however, should be discouraged
1523 because it leads to typos in the code not selected. However,
1524 emit_conditional_move won't exist either. */
1525 return NULL_RTX;
1526 #endif
1527 }
1528
1529 /* Try only simple constants and registers here. More complex cases
1530 are handled in noce_try_cmove_arith after noce_try_store_flag_arith
1531 has had a go at it. */
1532
1533 static int
1534 noce_try_cmove (struct noce_if_info *if_info)
1535 {
1536 enum rtx_code code;
1537 rtx target;
1538 rtx_insn *seq;
1539
1540 if ((CONSTANT_P (if_info->a) || register_operand (if_info->a, VOIDmode))
1541 && (CONSTANT_P (if_info->b) || register_operand (if_info->b, VOIDmode)))
1542 {
1543 start_sequence ();
1544
1545 code = GET_CODE (if_info->cond);
1546 target = noce_emit_cmove (if_info, if_info->x, code,
1547 XEXP (if_info->cond, 0),
1548 XEXP (if_info->cond, 1),
1549 if_info->a, if_info->b);
1550
1551 if (target)
1552 {
1553 if (target != if_info->x)
1554 noce_emit_move_insn (if_info->x, target);
1555
1556 seq = end_ifcvt_sequence (if_info);
1557 if (!seq)
1558 return FALSE;
1559
1560 emit_insn_before_setloc (seq, if_info->jump,
1561 INSN_LOCATION (if_info->insn_a));
1562 return TRUE;
1563 }
1564 else
1565 {
1566 end_sequence ();
1567 return FALSE;
1568 }
1569 }
1570
1571 return FALSE;
1572 }
1573
1574 /* Try more complex cases involving conditional_move. */
1575
1576 static int
1577 noce_try_cmove_arith (struct noce_if_info *if_info)
1578 {
1579 rtx a = if_info->a;
1580 rtx b = if_info->b;
1581 rtx x = if_info->x;
1582 rtx orig_a, orig_b;
1583 rtx_insn *insn_a, *insn_b;
1584 rtx target;
1585 int is_mem = 0;
1586 int insn_cost;
1587 enum rtx_code code;
1588 rtx_insn *ifcvt_seq;
1589
1590 /* A conditional move from two memory sources is equivalent to a
1591 conditional on their addresses followed by a load. Don't do this
1592 early because it'll screw alias analysis. Note that we've
1593 already checked for no side effects. */
1594 /* ??? FIXME: Magic number 5. */
1595 if (cse_not_expected
1596 && MEM_P (a) && MEM_P (b)
1597 && MEM_ADDR_SPACE (a) == MEM_ADDR_SPACE (b)
1598 && if_info->branch_cost >= 5)
1599 {
1600 machine_mode address_mode = get_address_mode (a);
1601
1602 a = XEXP (a, 0);
1603 b = XEXP (b, 0);
1604 x = gen_reg_rtx (address_mode);
1605 is_mem = 1;
1606 }
1607
1608 /* ??? We could handle this if we knew that a load from A or B could
1609 not trap or fault. This is also true if we've already loaded
1610 from the address along the path from ENTRY. */
1611 else if (may_trap_or_fault_p (a) || may_trap_or_fault_p (b))
1612 return FALSE;
1613
1614 /* if (test) x = a + b; else x = c - d;
1615 => y = a + b;
1616 x = c - d;
1617 if (test)
1618 x = y;
1619 */
1620
1621 code = GET_CODE (if_info->cond);
1622 insn_a = if_info->insn_a;
1623 insn_b = if_info->insn_b;
1624
1625 /* Total insn_rtx_cost should be smaller than branch cost. Exit
1626 if insn_rtx_cost can't be estimated. */
1627 if (insn_a)
1628 {
1629 insn_cost
1630 = insn_rtx_cost (PATTERN (insn_a),
1631 optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn_a)));
1632 if (insn_cost == 0 || insn_cost > COSTS_N_INSNS (if_info->branch_cost))
1633 return FALSE;
1634 }
1635 else
1636 insn_cost = 0;
1637
1638 if (insn_b)
1639 {
1640 insn_cost
1641 += insn_rtx_cost (PATTERN (insn_b),
1642 optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn_b)));
1643 if (insn_cost == 0 || insn_cost > COSTS_N_INSNS (if_info->branch_cost))
1644 return FALSE;
1645 }
1646
1647 /* Possibly rearrange operands to make things come out more natural. */
1648 if (reversed_comparison_code (if_info->cond, if_info->jump) != UNKNOWN)
1649 {
1650 int reversep = 0;
1651 if (rtx_equal_p (b, x))
1652 reversep = 1;
1653 else if (general_operand (b, GET_MODE (b)))
1654 reversep = 1;
1655
1656 if (reversep)
1657 {
1658 rtx tmp;
1659 rtx_insn *tmp_insn;
1660 code = reversed_comparison_code (if_info->cond, if_info->jump);
1661 tmp = a, a = b, b = tmp;
1662 tmp_insn = insn_a, insn_a = insn_b, insn_b = tmp_insn;
1663 }
1664 }
1665
1666 start_sequence ();
1667
1668 orig_a = a;
1669 orig_b = b;
1670
1671 /* If either operand is complex, load it into a register first.
1672 The best way to do this is to copy the original insn. In this
1673 way we preserve any clobbers etc that the insn may have had.
1674 This is of course not possible in the IS_MEM case. */
1675 if (! general_operand (a, GET_MODE (a)))
1676 {
1677 rtx_insn *insn;
1678
1679 if (is_mem)
1680 {
1681 rtx reg = gen_reg_rtx (GET_MODE (a));
1682 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, a));
1683 }
1684 else if (! insn_a)
1685 goto end_seq_and_fail;
1686 else
1687 {
1688 a = gen_reg_rtx (GET_MODE (a));
1689 rtx_insn *copy_of_a = as_a <rtx_insn *> (copy_rtx (insn_a));
1690 rtx set = single_set (copy_of_a);
1691 SET_DEST (set) = a;
1692 insn = emit_insn (PATTERN (copy_of_a));
1693 }
1694 if (recog_memoized (insn) < 0)
1695 goto end_seq_and_fail;
1696 }
1697 if (! general_operand (b, GET_MODE (b)))
1698 {
1699 rtx pat;
1700 rtx_insn *last;
1701 rtx_insn *new_insn;
1702
1703 if (is_mem)
1704 {
1705 rtx reg = gen_reg_rtx (GET_MODE (b));
1706 pat = gen_rtx_SET (VOIDmode, reg, b);
1707 }
1708 else if (! insn_b)
1709 goto end_seq_and_fail;
1710 else
1711 {
1712 b = gen_reg_rtx (GET_MODE (b));
1713 rtx_insn *copy_of_insn_b = as_a <rtx_insn *> (copy_rtx (insn_b));
1714 rtx set = single_set (copy_of_insn_b);
1715 SET_DEST (set) = b;
1716 pat = PATTERN (copy_of_insn_b);
1717 }
1718
1719 /* If insn to set up A clobbers any registers B depends on, try to
1720 swap insn that sets up A with the one that sets up B. If even
1721 that doesn't help, punt. */
1722 last = get_last_insn ();
1723 if (last && modified_in_p (orig_b, last))
1724 {
1725 new_insn = emit_insn_before (pat, get_insns ());
1726 if (modified_in_p (orig_a, new_insn))
1727 goto end_seq_and_fail;
1728 }
1729 else
1730 new_insn = emit_insn (pat);
1731
1732 if (recog_memoized (new_insn) < 0)
1733 goto end_seq_and_fail;
1734 }
1735
1736 target = noce_emit_cmove (if_info, x, code, XEXP (if_info->cond, 0),
1737 XEXP (if_info->cond, 1), a, b);
1738
1739 if (! target)
1740 goto end_seq_and_fail;
1741
1742 /* If we're handling a memory for above, emit the load now. */
1743 if (is_mem)
1744 {
1745 rtx mem = gen_rtx_MEM (GET_MODE (if_info->x), target);
1746
1747 /* Copy over flags as appropriate. */
1748 if (MEM_VOLATILE_P (if_info->a) || MEM_VOLATILE_P (if_info->b))
1749 MEM_VOLATILE_P (mem) = 1;
1750 if (MEM_ALIAS_SET (if_info->a) == MEM_ALIAS_SET (if_info->b))
1751 set_mem_alias_set (mem, MEM_ALIAS_SET (if_info->a));
1752 set_mem_align (mem,
1753 MIN (MEM_ALIGN (if_info->a), MEM_ALIGN (if_info->b)));
1754
1755 gcc_assert (MEM_ADDR_SPACE (if_info->a) == MEM_ADDR_SPACE (if_info->b));
1756 set_mem_addr_space (mem, MEM_ADDR_SPACE (if_info->a));
1757
1758 noce_emit_move_insn (if_info->x, mem);
1759 }
1760 else if (target != x)
1761 noce_emit_move_insn (x, target);
1762
1763 ifcvt_seq = end_ifcvt_sequence (if_info);
1764 if (!ifcvt_seq)
1765 return FALSE;
1766
1767 emit_insn_before_setloc (ifcvt_seq, if_info->jump,
1768 INSN_LOCATION (if_info->insn_a));
1769 return TRUE;
1770
1771 end_seq_and_fail:
1772 end_sequence ();
1773 return FALSE;
1774 }
1775
1776 /* For most cases, the simplified condition we found is the best
1777 choice, but this is not the case for the min/max/abs transforms.
1778 For these we wish to know that it is A or B in the condition. */
1779
1780 static rtx
1781 noce_get_alt_condition (struct noce_if_info *if_info, rtx target,
1782 rtx_insn **earliest)
1783 {
1784 rtx cond, set;
1785 rtx_insn *insn;
1786 int reverse;
1787 int allow_cc_mode = false;
1788 #if HAVE_cbranchcc4
1789 allow_cc_mode = true;
1790 #endif
1791
1792
1793 /* If target is already mentioned in the known condition, return it. */
1794 if (reg_mentioned_p (target, if_info->cond))
1795 {
1796 *earliest = if_info->cond_earliest;
1797 return if_info->cond;
1798 }
1799
1800 set = pc_set (if_info->jump);
1801 cond = XEXP (SET_SRC (set), 0);
1802 reverse
1803 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
1804 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (if_info->jump);
1805 if (if_info->then_else_reversed)
1806 reverse = !reverse;
1807
1808 /* If we're looking for a constant, try to make the conditional
1809 have that constant in it. There are two reasons why it may
1810 not have the constant we want:
1811
1812 1. GCC may have needed to put the constant in a register, because
1813 the target can't compare directly against that constant. For
1814 this case, we look for a SET immediately before the comparison
1815 that puts a constant in that register.
1816
1817 2. GCC may have canonicalized the conditional, for example
1818 replacing "if x < 4" with "if x <= 3". We can undo that (or
1819 make equivalent types of changes) to get the constants we need
1820 if they're off by one in the right direction. */
1821
1822 if (CONST_INT_P (target))
1823 {
1824 enum rtx_code code = GET_CODE (if_info->cond);
1825 rtx op_a = XEXP (if_info->cond, 0);
1826 rtx op_b = XEXP (if_info->cond, 1);
1827 rtx prev_insn;
1828
1829 /* First, look to see if we put a constant in a register. */
1830 prev_insn = prev_nonnote_insn (if_info->cond_earliest);
1831 if (prev_insn
1832 && BLOCK_FOR_INSN (prev_insn)
1833 == BLOCK_FOR_INSN (if_info->cond_earliest)
1834 && INSN_P (prev_insn)
1835 && GET_CODE (PATTERN (prev_insn)) == SET)
1836 {
1837 rtx src = find_reg_equal_equiv_note (prev_insn);
1838 if (!src)
1839 src = SET_SRC (PATTERN (prev_insn));
1840 if (CONST_INT_P (src))
1841 {
1842 if (rtx_equal_p (op_a, SET_DEST (PATTERN (prev_insn))))
1843 op_a = src;
1844 else if (rtx_equal_p (op_b, SET_DEST (PATTERN (prev_insn))))
1845 op_b = src;
1846
1847 if (CONST_INT_P (op_a))
1848 {
1849 rtx tmp = op_a;
1850 op_a = op_b;
1851 op_b = tmp;
1852 code = swap_condition (code);
1853 }
1854 }
1855 }
1856
1857 /* Now, look to see if we can get the right constant by
1858 adjusting the conditional. */
1859 if (CONST_INT_P (op_b))
1860 {
1861 HOST_WIDE_INT desired_val = INTVAL (target);
1862 HOST_WIDE_INT actual_val = INTVAL (op_b);
1863
1864 switch (code)
1865 {
1866 case LT:
1867 if (actual_val == desired_val + 1)
1868 {
1869 code = LE;
1870 op_b = GEN_INT (desired_val);
1871 }
1872 break;
1873 case LE:
1874 if (actual_val == desired_val - 1)
1875 {
1876 code = LT;
1877 op_b = GEN_INT (desired_val);
1878 }
1879 break;
1880 case GT:
1881 if (actual_val == desired_val - 1)
1882 {
1883 code = GE;
1884 op_b = GEN_INT (desired_val);
1885 }
1886 break;
1887 case GE:
1888 if (actual_val == desired_val + 1)
1889 {
1890 code = GT;
1891 op_b = GEN_INT (desired_val);
1892 }
1893 break;
1894 default:
1895 break;
1896 }
1897 }
1898
1899 /* If we made any changes, generate a new conditional that is
1900 equivalent to what we started with, but has the right
1901 constants in it. */
1902 if (code != GET_CODE (if_info->cond)
1903 || op_a != XEXP (if_info->cond, 0)
1904 || op_b != XEXP (if_info->cond, 1))
1905 {
1906 cond = gen_rtx_fmt_ee (code, GET_MODE (cond), op_a, op_b);
1907 *earliest = if_info->cond_earliest;
1908 return cond;
1909 }
1910 }
1911
1912 cond = canonicalize_condition (if_info->jump, cond, reverse,
1913 earliest, target, allow_cc_mode, true);
1914 if (! cond || ! reg_mentioned_p (target, cond))
1915 return NULL;
1916
1917 /* We almost certainly searched back to a different place.
1918 Need to re-verify correct lifetimes. */
1919
1920 /* X may not be mentioned in the range (cond_earliest, jump]. */
1921 for (insn = if_info->jump; insn != *earliest; insn = PREV_INSN (insn))
1922 if (INSN_P (insn) && reg_overlap_mentioned_p (if_info->x, PATTERN (insn)))
1923 return NULL;
1924
1925 /* A and B may not be modified in the range [cond_earliest, jump). */
1926 for (insn = *earliest; insn != if_info->jump; insn = NEXT_INSN (insn))
1927 if (INSN_P (insn)
1928 && (modified_in_p (if_info->a, insn)
1929 || modified_in_p (if_info->b, insn)))
1930 return NULL;
1931
1932 return cond;
1933 }
1934
1935 /* Convert "if (a < b) x = a; else x = b;" to "x = min(a, b);", etc. */
1936
1937 static int
1938 noce_try_minmax (struct noce_if_info *if_info)
1939 {
1940 rtx cond, target;
1941 rtx_insn *earliest, *seq;
1942 enum rtx_code code, op;
1943 int unsignedp;
1944
1945 /* ??? Reject modes with NaNs or signed zeros since we don't know how
1946 they will be resolved with an SMIN/SMAX. It wouldn't be too hard
1947 to get the target to tell us... */
1948 if (HONOR_SIGNED_ZEROS (GET_MODE (if_info->x))
1949 || HONOR_NANS (GET_MODE (if_info->x)))
1950 return FALSE;
1951
1952 cond = noce_get_alt_condition (if_info, if_info->a, &earliest);
1953 if (!cond)
1954 return FALSE;
1955
1956 /* Verify the condition is of the form we expect, and canonicalize
1957 the comparison code. */
1958 code = GET_CODE (cond);
1959 if (rtx_equal_p (XEXP (cond, 0), if_info->a))
1960 {
1961 if (! rtx_equal_p (XEXP (cond, 1), if_info->b))
1962 return FALSE;
1963 }
1964 else if (rtx_equal_p (XEXP (cond, 1), if_info->a))
1965 {
1966 if (! rtx_equal_p (XEXP (cond, 0), if_info->b))
1967 return FALSE;
1968 code = swap_condition (code);
1969 }
1970 else
1971 return FALSE;
1972
1973 /* Determine what sort of operation this is. Note that the code is for
1974 a taken branch, so the code->operation mapping appears backwards. */
1975 switch (code)
1976 {
1977 case LT:
1978 case LE:
1979 case UNLT:
1980 case UNLE:
1981 op = SMAX;
1982 unsignedp = 0;
1983 break;
1984 case GT:
1985 case GE:
1986 case UNGT:
1987 case UNGE:
1988 op = SMIN;
1989 unsignedp = 0;
1990 break;
1991 case LTU:
1992 case LEU:
1993 op = UMAX;
1994 unsignedp = 1;
1995 break;
1996 case GTU:
1997 case GEU:
1998 op = UMIN;
1999 unsignedp = 1;
2000 break;
2001 default:
2002 return FALSE;
2003 }
2004
2005 start_sequence ();
2006
2007 target = expand_simple_binop (GET_MODE (if_info->x), op,
2008 if_info->a, if_info->b,
2009 if_info->x, unsignedp, OPTAB_WIDEN);
2010 if (! target)
2011 {
2012 end_sequence ();
2013 return FALSE;
2014 }
2015 if (target != if_info->x)
2016 noce_emit_move_insn (if_info->x, target);
2017
2018 seq = end_ifcvt_sequence (if_info);
2019 if (!seq)
2020 return FALSE;
2021
2022 emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATION (if_info->insn_a));
2023 if_info->cond = cond;
2024 if_info->cond_earliest = earliest;
2025
2026 return TRUE;
2027 }
2028
2029 /* Convert "if (a < 0) x = -a; else x = a;" to "x = abs(a);",
2030 "if (a < 0) x = ~a; else x = a;" to "x = one_cmpl_abs(a);",
2031 etc. */
2032
2033 static int
2034 noce_try_abs (struct noce_if_info *if_info)
2035 {
2036 rtx cond, target, a, b, c;
2037 rtx_insn *earliest, *seq;
2038 int negate;
2039 bool one_cmpl = false;
2040
2041 /* Reject modes with signed zeros. */
2042 if (HONOR_SIGNED_ZEROS (GET_MODE (if_info->x)))
2043 return FALSE;
2044
2045 /* Recognize A and B as constituting an ABS or NABS. The canonical
2046 form is a branch around the negation, taken when the object is the
2047 first operand of a comparison against 0 that evaluates to true. */
2048 a = if_info->a;
2049 b = if_info->b;
2050 if (GET_CODE (a) == NEG && rtx_equal_p (XEXP (a, 0), b))
2051 negate = 0;
2052 else if (GET_CODE (b) == NEG && rtx_equal_p (XEXP (b, 0), a))
2053 {
2054 c = a; a = b; b = c;
2055 negate = 1;
2056 }
2057 else if (GET_CODE (a) == NOT && rtx_equal_p (XEXP (a, 0), b))
2058 {
2059 negate = 0;
2060 one_cmpl = true;
2061 }
2062 else if (GET_CODE (b) == NOT && rtx_equal_p (XEXP (b, 0), a))
2063 {
2064 c = a; a = b; b = c;
2065 negate = 1;
2066 one_cmpl = true;
2067 }
2068 else
2069 return FALSE;
2070
2071 cond = noce_get_alt_condition (if_info, b, &earliest);
2072 if (!cond)
2073 return FALSE;
2074
2075 /* Verify the condition is of the form we expect. */
2076 if (rtx_equal_p (XEXP (cond, 0), b))
2077 c = XEXP (cond, 1);
2078 else if (rtx_equal_p (XEXP (cond, 1), b))
2079 {
2080 c = XEXP (cond, 0);
2081 negate = !negate;
2082 }
2083 else
2084 return FALSE;
2085
2086 /* Verify that C is zero. Search one step backward for a
2087 REG_EQUAL note or a simple source if necessary. */
2088 if (REG_P (c))
2089 {
2090 rtx set;
2091 rtx_insn *insn = prev_nonnote_insn (earliest);
2092 if (insn
2093 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (earliest)
2094 && (set = single_set (insn))
2095 && rtx_equal_p (SET_DEST (set), c))
2096 {
2097 rtx note = find_reg_equal_equiv_note (insn);
2098 if (note)
2099 c = XEXP (note, 0);
2100 else
2101 c = SET_SRC (set);
2102 }
2103 else
2104 return FALSE;
2105 }
2106 if (MEM_P (c)
2107 && GET_CODE (XEXP (c, 0)) == SYMBOL_REF
2108 && CONSTANT_POOL_ADDRESS_P (XEXP (c, 0)))
2109 c = get_pool_constant (XEXP (c, 0));
2110
2111 /* Work around funny ideas get_condition has wrt canonicalization.
2112 Note that these rtx constants are known to be CONST_INT, and
2113 therefore imply integer comparisons. */
2114 if (c == constm1_rtx && GET_CODE (cond) == GT)
2115 ;
2116 else if (c == const1_rtx && GET_CODE (cond) == LT)
2117 ;
2118 else if (c != CONST0_RTX (GET_MODE (b)))
2119 return FALSE;
2120
2121 /* Determine what sort of operation this is. */
2122 switch (GET_CODE (cond))
2123 {
2124 case LT:
2125 case LE:
2126 case UNLT:
2127 case UNLE:
2128 negate = !negate;
2129 break;
2130 case GT:
2131 case GE:
2132 case UNGT:
2133 case UNGE:
2134 break;
2135 default:
2136 return FALSE;
2137 }
2138
2139 start_sequence ();
2140 if (one_cmpl)
2141 target = expand_one_cmpl_abs_nojump (GET_MODE (if_info->x), b,
2142 if_info->x);
2143 else
2144 target = expand_abs_nojump (GET_MODE (if_info->x), b, if_info->x, 1);
2145
2146 /* ??? It's a quandary whether cmove would be better here, especially
2147 for integers. Perhaps combine will clean things up. */
2148 if (target && negate)
2149 {
2150 if (one_cmpl)
2151 target = expand_simple_unop (GET_MODE (target), NOT, target,
2152 if_info->x, 0);
2153 else
2154 target = expand_simple_unop (GET_MODE (target), NEG, target,
2155 if_info->x, 0);
2156 }
2157
2158 if (! target)
2159 {
2160 end_sequence ();
2161 return FALSE;
2162 }
2163
2164 if (target != if_info->x)
2165 noce_emit_move_insn (if_info->x, target);
2166
2167 seq = end_ifcvt_sequence (if_info);
2168 if (!seq)
2169 return FALSE;
2170
2171 emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATION (if_info->insn_a));
2172 if_info->cond = cond;
2173 if_info->cond_earliest = earliest;
2174
2175 return TRUE;
2176 }
2177
2178 /* Convert "if (m < 0) x = b; else x = 0;" to "x = (m >> C) & b;". */
2179
2180 static int
2181 noce_try_sign_mask (struct noce_if_info *if_info)
2182 {
2183 rtx cond, t, m, c;
2184 rtx_insn *seq;
2185 machine_mode mode;
2186 enum rtx_code code;
2187 bool t_unconditional;
2188
2189 cond = if_info->cond;
2190 code = GET_CODE (cond);
2191 m = XEXP (cond, 0);
2192 c = XEXP (cond, 1);
2193
2194 t = NULL_RTX;
2195 if (if_info->a == const0_rtx)
2196 {
2197 if ((code == LT && c == const0_rtx)
2198 || (code == LE && c == constm1_rtx))
2199 t = if_info->b;
2200 }
2201 else if (if_info->b == const0_rtx)
2202 {
2203 if ((code == GE && c == const0_rtx)
2204 || (code == GT && c == constm1_rtx))
2205 t = if_info->a;
2206 }
2207
2208 if (! t || side_effects_p (t))
2209 return FALSE;
2210
2211 /* We currently don't handle different modes. */
2212 mode = GET_MODE (t);
2213 if (GET_MODE (m) != mode)
2214 return FALSE;
2215
2216 /* This is only profitable if T is unconditionally executed/evaluated in the
2217 original insn sequence or T is cheap. The former happens if B is the
2218 non-zero (T) value and if INSN_B was taken from TEST_BB, or there was no
2219 INSN_B which can happen for e.g. conditional stores to memory. For the
2220 cost computation use the block TEST_BB where the evaluation will end up
2221 after the transformation. */
2222 t_unconditional =
2223 (t == if_info->b
2224 && (if_info->insn_b == NULL_RTX
2225 || BLOCK_FOR_INSN (if_info->insn_b) == if_info->test_bb));
2226 if (!(t_unconditional
2227 || (set_src_cost (t, optimize_bb_for_speed_p (if_info->test_bb))
2228 < COSTS_N_INSNS (2))))
2229 return FALSE;
2230
2231 start_sequence ();
2232 /* Use emit_store_flag to generate "m < 0 ? -1 : 0" instead of expanding
2233 "(signed) m >> 31" directly. This benefits targets with specialized
2234 insns to obtain the signmask, but still uses ashr_optab otherwise. */
2235 m = emit_store_flag (gen_reg_rtx (mode), LT, m, const0_rtx, mode, 0, -1);
2236 t = m ? expand_binop (mode, and_optab, m, t, NULL_RTX, 0, OPTAB_DIRECT)
2237 : NULL_RTX;
2238
2239 if (!t)
2240 {
2241 end_sequence ();
2242 return FALSE;
2243 }
2244
2245 noce_emit_move_insn (if_info->x, t);
2246
2247 seq = end_ifcvt_sequence (if_info);
2248 if (!seq)
2249 return FALSE;
2250
2251 emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATION (if_info->insn_a));
2252 return TRUE;
2253 }
2254
2255
2256 /* Optimize away "if (x & C) x |= C" and similar bit manipulation
2257 transformations. */
2258
2259 static int
2260 noce_try_bitop (struct noce_if_info *if_info)
2261 {
2262 rtx cond, x, a, result;
2263 rtx_insn *seq;
2264 machine_mode mode;
2265 enum rtx_code code;
2266 int bitnum;
2267
2268 x = if_info->x;
2269 cond = if_info->cond;
2270 code = GET_CODE (cond);
2271
2272 /* Check for no else condition. */
2273 if (! rtx_equal_p (x, if_info->b))
2274 return FALSE;
2275
2276 /* Check for a suitable condition. */
2277 if (code != NE && code != EQ)
2278 return FALSE;
2279 if (XEXP (cond, 1) != const0_rtx)
2280 return FALSE;
2281 cond = XEXP (cond, 0);
2282
2283 /* ??? We could also handle AND here. */
2284 if (GET_CODE (cond) == ZERO_EXTRACT)
2285 {
2286 if (XEXP (cond, 1) != const1_rtx
2287 || !CONST_INT_P (XEXP (cond, 2))
2288 || ! rtx_equal_p (x, XEXP (cond, 0)))
2289 return FALSE;
2290 bitnum = INTVAL (XEXP (cond, 2));
2291 mode = GET_MODE (x);
2292 if (BITS_BIG_ENDIAN)
2293 bitnum = GET_MODE_BITSIZE (mode) - 1 - bitnum;
2294 if (bitnum < 0 || bitnum >= HOST_BITS_PER_WIDE_INT)
2295 return FALSE;
2296 }
2297 else
2298 return FALSE;
2299
2300 a = if_info->a;
2301 if (GET_CODE (a) == IOR || GET_CODE (a) == XOR)
2302 {
2303 /* Check for "if (X & C) x = x op C". */
2304 if (! rtx_equal_p (x, XEXP (a, 0))
2305 || !CONST_INT_P (XEXP (a, 1))
2306 || (INTVAL (XEXP (a, 1)) & GET_MODE_MASK (mode))
2307 != (unsigned HOST_WIDE_INT) 1 << bitnum)
2308 return FALSE;
2309
2310 /* if ((x & C) == 0) x |= C; is transformed to x |= C. */
2311 /* if ((x & C) != 0) x |= C; is transformed to nothing. */
2312 if (GET_CODE (a) == IOR)
2313 result = (code == NE) ? a : NULL_RTX;
2314 else if (code == NE)
2315 {
2316 /* if ((x & C) == 0) x ^= C; is transformed to x |= C. */
2317 result = gen_int_mode ((HOST_WIDE_INT) 1 << bitnum, mode);
2318 result = simplify_gen_binary (IOR, mode, x, result);
2319 }
2320 else
2321 {
2322 /* if ((x & C) != 0) x ^= C; is transformed to x &= ~C. */
2323 result = gen_int_mode (~((HOST_WIDE_INT) 1 << bitnum), mode);
2324 result = simplify_gen_binary (AND, mode, x, result);
2325 }
2326 }
2327 else if (GET_CODE (a) == AND)
2328 {
2329 /* Check for "if (X & C) x &= ~C". */
2330 if (! rtx_equal_p (x, XEXP (a, 0))
2331 || !CONST_INT_P (XEXP (a, 1))
2332 || (INTVAL (XEXP (a, 1)) & GET_MODE_MASK (mode))
2333 != (~((HOST_WIDE_INT) 1 << bitnum) & GET_MODE_MASK (mode)))
2334 return FALSE;
2335
2336 /* if ((x & C) == 0) x &= ~C; is transformed to nothing. */
2337 /* if ((x & C) != 0) x &= ~C; is transformed to x &= ~C. */
2338 result = (code == EQ) ? a : NULL_RTX;
2339 }
2340 else
2341 return FALSE;
2342
2343 if (result)
2344 {
2345 start_sequence ();
2346 noce_emit_move_insn (x, result);
2347 seq = end_ifcvt_sequence (if_info);
2348 if (!seq)
2349 return FALSE;
2350
2351 emit_insn_before_setloc (seq, if_info->jump,
2352 INSN_LOCATION (if_info->insn_a));
2353 }
2354 return TRUE;
2355 }
2356
2357
2358 /* Similar to get_condition, only the resulting condition must be
2359 valid at JUMP, instead of at EARLIEST.
2360
2361 If THEN_ELSE_REVERSED is true, the fallthrough does not go to the
2362 THEN block of the caller, and we have to reverse the condition. */
2363
2364 static rtx
2365 noce_get_condition (rtx_insn *jump, rtx_insn **earliest, bool then_else_reversed)
2366 {
2367 rtx cond, set, tmp;
2368 bool reverse;
2369 int allow_cc_mode = false;
2370 #if HAVE_cbranchcc4
2371 allow_cc_mode = true;
2372 #endif
2373
2374 if (! any_condjump_p (jump))
2375 return NULL_RTX;
2376
2377 set = pc_set (jump);
2378
2379 /* If this branches to JUMP_LABEL when the condition is false,
2380 reverse the condition. */
2381 reverse = (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
2382 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump));
2383
2384 /* We may have to reverse because the caller's if block is not canonical,
2385 i.e. the THEN block isn't the fallthrough block for the TEST block
2386 (see find_if_header). */
2387 if (then_else_reversed)
2388 reverse = !reverse;
2389
2390 /* If the condition variable is a register and is MODE_INT, accept it. */
2391
2392 cond = XEXP (SET_SRC (set), 0);
2393 tmp = XEXP (cond, 0);
2394 if (REG_P (tmp) && GET_MODE_CLASS (GET_MODE (tmp)) == MODE_INT
2395 && (GET_MODE (tmp) != BImode
2396 || !targetm.small_register_classes_for_mode_p (BImode)))
2397 {
2398 *earliest = jump;
2399
2400 if (reverse)
2401 cond = gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)),
2402 GET_MODE (cond), tmp, XEXP (cond, 1));
2403 return cond;
2404 }
2405
2406 /* Otherwise, fall back on canonicalize_condition to do the dirty
2407 work of manipulating MODE_CC values and COMPARE rtx codes. */
2408 tmp = canonicalize_condition (jump, cond, reverse, earliest,
2409 NULL_RTX, allow_cc_mode, true);
2410
2411 /* We don't handle side-effects in the condition, like handling
2412 REG_INC notes and making sure no duplicate conditions are emitted. */
2413 if (tmp != NULL_RTX && side_effects_p (tmp))
2414 return NULL_RTX;
2415
2416 return tmp;
2417 }
2418
2419 /* Return true if OP is ok for if-then-else processing. */
2420
2421 static int
2422 noce_operand_ok (const_rtx op)
2423 {
2424 if (side_effects_p (op))
2425 return FALSE;
2426
2427 /* We special-case memories, so handle any of them with
2428 no address side effects. */
2429 if (MEM_P (op))
2430 return ! side_effects_p (XEXP (op, 0));
2431
2432 return ! may_trap_p (op);
2433 }
2434
2435 /* Return true if a write into MEM may trap or fault. */
2436
2437 static bool
2438 noce_mem_write_may_trap_or_fault_p (const_rtx mem)
2439 {
2440 rtx addr;
2441
2442 if (MEM_READONLY_P (mem))
2443 return true;
2444
2445 if (may_trap_or_fault_p (mem))
2446 return true;
2447
2448 addr = XEXP (mem, 0);
2449
2450 /* Call target hook to avoid the effects of -fpic etc.... */
2451 addr = targetm.delegitimize_address (addr);
2452
2453 while (addr)
2454 switch (GET_CODE (addr))
2455 {
2456 case CONST:
2457 case PRE_DEC:
2458 case PRE_INC:
2459 case POST_DEC:
2460 case POST_INC:
2461 case POST_MODIFY:
2462 addr = XEXP (addr, 0);
2463 break;
2464 case LO_SUM:
2465 case PRE_MODIFY:
2466 addr = XEXP (addr, 1);
2467 break;
2468 case PLUS:
2469 if (CONST_INT_P (XEXP (addr, 1)))
2470 addr = XEXP (addr, 0);
2471 else
2472 return false;
2473 break;
2474 case LABEL_REF:
2475 return true;
2476 case SYMBOL_REF:
2477 if (SYMBOL_REF_DECL (addr)
2478 && decl_readonly_section (SYMBOL_REF_DECL (addr), 0))
2479 return true;
2480 return false;
2481 default:
2482 return false;
2483 }
2484
2485 return false;
2486 }
2487
2488 /* Return whether we can use store speculation for MEM. TOP_BB is the
2489 basic block above the conditional block where we are considering
2490 doing the speculative store. We look for whether MEM is set
2491 unconditionally later in the function. */
2492
2493 static bool
2494 noce_can_store_speculate_p (basic_block top_bb, const_rtx mem)
2495 {
2496 basic_block dominator;
2497
2498 for (dominator = get_immediate_dominator (CDI_POST_DOMINATORS, top_bb);
2499 dominator != NULL;
2500 dominator = get_immediate_dominator (CDI_POST_DOMINATORS, dominator))
2501 {
2502 rtx_insn *insn;
2503
2504 FOR_BB_INSNS (dominator, insn)
2505 {
2506 /* If we see something that might be a memory barrier, we
2507 have to stop looking. Even if the MEM is set later in
2508 the function, we still don't want to set it
2509 unconditionally before the barrier. */
2510 if (INSN_P (insn)
2511 && (volatile_insn_p (PATTERN (insn))
2512 || (CALL_P (insn) && (!RTL_CONST_CALL_P (insn)))))
2513 return false;
2514
2515 if (memory_must_be_modified_in_insn_p (mem, insn))
2516 return true;
2517 if (modified_in_p (XEXP (mem, 0), insn))
2518 return false;
2519
2520 }
2521 }
2522
2523 return false;
2524 }
2525
2526 /* Given a simple IF-THEN-JOIN or IF-THEN-ELSE-JOIN block, attempt to convert
2527 it without using conditional execution. Return TRUE if we were successful
2528 at converting the block. */
2529
2530 static int
2531 noce_process_if_block (struct noce_if_info *if_info)
2532 {
2533 basic_block test_bb = if_info->test_bb; /* test block */
2534 basic_block then_bb = if_info->then_bb; /* THEN */
2535 basic_block else_bb = if_info->else_bb; /* ELSE or NULL */
2536 basic_block join_bb = if_info->join_bb; /* JOIN */
2537 rtx_insn *jump = if_info->jump;
2538 rtx cond = if_info->cond;
2539 rtx_insn *insn_a, *insn_b;
2540 rtx set_a, set_b;
2541 rtx orig_x, x, a, b;
2542
2543 /* We're looking for patterns of the form
2544
2545 (1) if (...) x = a; else x = b;
2546 (2) x = b; if (...) x = a;
2547 (3) if (...) x = a; // as if with an initial x = x.
2548
2549 The later patterns require jumps to be more expensive.
2550
2551 ??? For future expansion, look for multiple X in such patterns. */
2552
2553 /* Look for one of the potential sets. */
2554 insn_a = first_active_insn (then_bb);
2555 if (! insn_a
2556 || insn_a != last_active_insn (then_bb, FALSE)
2557 || (set_a = single_set (insn_a)) == NULL_RTX)
2558 return FALSE;
2559
2560 x = SET_DEST (set_a);
2561 a = SET_SRC (set_a);
2562
2563 /* Look for the other potential set. Make sure we've got equivalent
2564 destinations. */
2565 /* ??? This is overconservative. Storing to two different mems is
2566 as easy as conditionally computing the address. Storing to a
2567 single mem merely requires a scratch memory to use as one of the
2568 destination addresses; often the memory immediately below the
2569 stack pointer is available for this. */
2570 set_b = NULL_RTX;
2571 if (else_bb)
2572 {
2573 insn_b = first_active_insn (else_bb);
2574 if (! insn_b
2575 || insn_b != last_active_insn (else_bb, FALSE)
2576 || (set_b = single_set (insn_b)) == NULL_RTX
2577 || ! rtx_interchangeable_p (x, SET_DEST (set_b)))
2578 return FALSE;
2579 }
2580 else
2581 {
2582 insn_b = prev_nonnote_nondebug_insn (if_info->cond_earliest);
2583 /* We're going to be moving the evaluation of B down from above
2584 COND_EARLIEST to JUMP. Make sure the relevant data is still
2585 intact. */
2586 if (! insn_b
2587 || BLOCK_FOR_INSN (insn_b) != BLOCK_FOR_INSN (if_info->cond_earliest)
2588 || !NONJUMP_INSN_P (insn_b)
2589 || (set_b = single_set (insn_b)) == NULL_RTX
2590 || ! rtx_interchangeable_p (x, SET_DEST (set_b))
2591 || ! noce_operand_ok (SET_SRC (set_b))
2592 || reg_overlap_mentioned_p (x, SET_SRC (set_b))
2593 || modified_between_p (SET_SRC (set_b), insn_b, jump)
2594 /* Avoid extending the lifetime of hard registers on small
2595 register class machines. */
2596 || (REG_P (SET_SRC (set_b))
2597 && HARD_REGISTER_P (SET_SRC (set_b))
2598 && targetm.small_register_classes_for_mode_p
2599 (GET_MODE (SET_SRC (set_b))))
2600 /* Likewise with X. In particular this can happen when
2601 noce_get_condition looks farther back in the instruction
2602 stream than one might expect. */
2603 || reg_overlap_mentioned_p (x, cond)
2604 || reg_overlap_mentioned_p (x, a)
2605 || modified_between_p (x, insn_b, jump))
2606 {
2607 insn_b = NULL;
2608 set_b = NULL_RTX;
2609 }
2610 }
2611
2612 /* If x has side effects then only the if-then-else form is safe to
2613 convert. But even in that case we would need to restore any notes
2614 (such as REG_INC) at then end. That can be tricky if
2615 noce_emit_move_insn expands to more than one insn, so disable the
2616 optimization entirely for now if there are side effects. */
2617 if (side_effects_p (x))
2618 return FALSE;
2619
2620 b = (set_b ? SET_SRC (set_b) : x);
2621
2622 /* Only operate on register destinations, and even then avoid extending
2623 the lifetime of hard registers on small register class machines. */
2624 orig_x = x;
2625 if (!REG_P (x)
2626 || (HARD_REGISTER_P (x)
2627 && targetm.small_register_classes_for_mode_p (GET_MODE (x))))
2628 {
2629 if (GET_MODE (x) == BLKmode)
2630 return FALSE;
2631
2632 if (GET_CODE (x) == ZERO_EXTRACT
2633 && (!CONST_INT_P (XEXP (x, 1))
2634 || !CONST_INT_P (XEXP (x, 2))))
2635 return FALSE;
2636
2637 x = gen_reg_rtx (GET_MODE (GET_CODE (x) == STRICT_LOW_PART
2638 ? XEXP (x, 0) : x));
2639 }
2640
2641 /* Don't operate on sources that may trap or are volatile. */
2642 if (! noce_operand_ok (a) || ! noce_operand_ok (b))
2643 return FALSE;
2644
2645 retry:
2646 /* Set up the info block for our subroutines. */
2647 if_info->insn_a = insn_a;
2648 if_info->insn_b = insn_b;
2649 if_info->x = x;
2650 if_info->a = a;
2651 if_info->b = b;
2652
2653 /* Try optimizations in some approximation of a useful order. */
2654 /* ??? Should first look to see if X is live incoming at all. If it
2655 isn't, we don't need anything but an unconditional set. */
2656
2657 /* Look and see if A and B are really the same. Avoid creating silly
2658 cmove constructs that no one will fix up later. */
2659 if (rtx_interchangeable_p (a, b))
2660 {
2661 /* If we have an INSN_B, we don't have to create any new rtl. Just
2662 move the instruction that we already have. If we don't have an
2663 INSN_B, that means that A == X, and we've got a noop move. In
2664 that case don't do anything and let the code below delete INSN_A. */
2665 if (insn_b && else_bb)
2666 {
2667 rtx note;
2668
2669 if (else_bb && insn_b == BB_END (else_bb))
2670 BB_END (else_bb) = PREV_INSN (insn_b);
2671 reorder_insns (insn_b, insn_b, PREV_INSN (jump));
2672
2673 /* If there was a REG_EQUAL note, delete it since it may have been
2674 true due to this insn being after a jump. */
2675 if ((note = find_reg_note (insn_b, REG_EQUAL, NULL_RTX)) != 0)
2676 remove_note (insn_b, note);
2677
2678 insn_b = NULL;
2679 }
2680 /* If we have "x = b; if (...) x = a;", and x has side-effects, then
2681 x must be executed twice. */
2682 else if (insn_b && side_effects_p (orig_x))
2683 return FALSE;
2684
2685 x = orig_x;
2686 goto success;
2687 }
2688
2689 if (!set_b && MEM_P (orig_x))
2690 {
2691 /* Disallow the "if (...) x = a;" form (implicit "else x = x;")
2692 for optimizations if writing to x may trap or fault,
2693 i.e. it's a memory other than a static var or a stack slot,
2694 is misaligned on strict aligned machines or is read-only. If
2695 x is a read-only memory, then the program is valid only if we
2696 avoid the store into it. If there are stores on both the
2697 THEN and ELSE arms, then we can go ahead with the conversion;
2698 either the program is broken, or the condition is always
2699 false such that the other memory is selected. */
2700 if (noce_mem_write_may_trap_or_fault_p (orig_x))
2701 return FALSE;
2702
2703 /* Avoid store speculation: given "if (...) x = a" where x is a
2704 MEM, we only want to do the store if x is always set
2705 somewhere in the function. This avoids cases like
2706 if (pthread_mutex_trylock(mutex))
2707 ++global_variable;
2708 where we only want global_variable to be changed if the mutex
2709 is held. FIXME: This should ideally be expressed directly in
2710 RTL somehow. */
2711 if (!noce_can_store_speculate_p (test_bb, orig_x))
2712 return FALSE;
2713 }
2714
2715 if (noce_try_move (if_info))
2716 goto success;
2717 if (noce_try_store_flag (if_info))
2718 goto success;
2719 if (noce_try_bitop (if_info))
2720 goto success;
2721 if (noce_try_minmax (if_info))
2722 goto success;
2723 if (noce_try_abs (if_info))
2724 goto success;
2725 if (HAVE_conditional_move
2726 && noce_try_cmove (if_info))
2727 goto success;
2728 if (! targetm.have_conditional_execution ())
2729 {
2730 if (noce_try_store_flag_constants (if_info))
2731 goto success;
2732 if (noce_try_addcc (if_info))
2733 goto success;
2734 if (noce_try_store_flag_mask (if_info))
2735 goto success;
2736 if (HAVE_conditional_move
2737 && noce_try_cmove_arith (if_info))
2738 goto success;
2739 if (noce_try_sign_mask (if_info))
2740 goto success;
2741 }
2742
2743 if (!else_bb && set_b)
2744 {
2745 insn_b = NULL;
2746 set_b = NULL_RTX;
2747 b = orig_x;
2748 goto retry;
2749 }
2750
2751 return FALSE;
2752
2753 success:
2754
2755 /* If we used a temporary, fix it up now. */
2756 if (orig_x != x)
2757 {
2758 rtx_insn *seq;
2759
2760 start_sequence ();
2761 noce_emit_move_insn (orig_x, x);
2762 seq = get_insns ();
2763 set_used_flags (orig_x);
2764 unshare_all_rtl_in_chain (seq);
2765 end_sequence ();
2766
2767 emit_insn_before_setloc (seq, BB_END (test_bb), INSN_LOCATION (insn_a));
2768 }
2769
2770 /* The original THEN and ELSE blocks may now be removed. The test block
2771 must now jump to the join block. If the test block and the join block
2772 can be merged, do so. */
2773 if (else_bb)
2774 {
2775 delete_basic_block (else_bb);
2776 num_true_changes++;
2777 }
2778 else
2779 remove_edge (find_edge (test_bb, join_bb));
2780
2781 remove_edge (find_edge (then_bb, join_bb));
2782 redirect_edge_and_branch_force (single_succ_edge (test_bb), join_bb);
2783 delete_basic_block (then_bb);
2784 num_true_changes++;
2785
2786 if (can_merge_blocks_p (test_bb, join_bb))
2787 {
2788 merge_blocks (test_bb, join_bb);
2789 num_true_changes++;
2790 }
2791
2792 num_updated_if_blocks++;
2793 return TRUE;
2794 }
2795
2796 /* Check whether a block is suitable for conditional move conversion.
2797 Every insn must be a simple set of a register to a constant or a
2798 register. For each assignment, store the value in the pointer map
2799 VALS, keyed indexed by register pointer, then store the register
2800 pointer in REGS. COND is the condition we will test. */
2801
2802 static int
2803 check_cond_move_block (basic_block bb,
2804 hash_map<rtx, rtx> *vals,
2805 vec<rtx> *regs,
2806 rtx cond)
2807 {
2808 rtx_insn *insn;
2809
2810 /* We can only handle simple jumps at the end of the basic block.
2811 It is almost impossible to update the CFG otherwise. */
2812 insn = BB_END (bb);
2813 if (JUMP_P (insn) && !onlyjump_p (insn))
2814 return FALSE;
2815
2816 FOR_BB_INSNS (bb, insn)
2817 {
2818 rtx set, dest, src;
2819
2820 if (!NONDEBUG_INSN_P (insn) || JUMP_P (insn))
2821 continue;
2822 set = single_set (insn);
2823 if (!set)
2824 return FALSE;
2825
2826 dest = SET_DEST (set);
2827 src = SET_SRC (set);
2828 if (!REG_P (dest)
2829 || (HARD_REGISTER_P (dest)
2830 && targetm.small_register_classes_for_mode_p (GET_MODE (dest))))
2831 return FALSE;
2832
2833 if (!CONSTANT_P (src) && !register_operand (src, VOIDmode))
2834 return FALSE;
2835
2836 if (side_effects_p (src) || side_effects_p (dest))
2837 return FALSE;
2838
2839 if (may_trap_p (src) || may_trap_p (dest))
2840 return FALSE;
2841
2842 /* Don't try to handle this if the source register was
2843 modified earlier in the block. */
2844 if ((REG_P (src)
2845 && vals->get (src))
2846 || (GET_CODE (src) == SUBREG && REG_P (SUBREG_REG (src))
2847 && vals->get (SUBREG_REG (src))))
2848 return FALSE;
2849
2850 /* Don't try to handle this if the destination register was
2851 modified earlier in the block. */
2852 if (vals->get (dest))
2853 return FALSE;
2854
2855 /* Don't try to handle this if the condition uses the
2856 destination register. */
2857 if (reg_overlap_mentioned_p (dest, cond))
2858 return FALSE;
2859
2860 /* Don't try to handle this if the source register is modified
2861 later in the block. */
2862 if (!CONSTANT_P (src)
2863 && modified_between_p (src, insn, NEXT_INSN (BB_END (bb))))
2864 return FALSE;
2865
2866 vals->put (dest, src);
2867
2868 regs->safe_push (dest);
2869 }
2870
2871 return TRUE;
2872 }
2873
2874 /* Given a basic block BB suitable for conditional move conversion,
2875 a condition COND, and pointer maps THEN_VALS and ELSE_VALS containing
2876 the register values depending on COND, emit the insns in the block as
2877 conditional moves. If ELSE_BLOCK is true, THEN_BB was already
2878 processed. The caller has started a sequence for the conversion.
2879 Return true if successful, false if something goes wrong. */
2880
2881 static bool
2882 cond_move_convert_if_block (struct noce_if_info *if_infop,
2883 basic_block bb, rtx cond,
2884 hash_map<rtx, rtx> *then_vals,
2885 hash_map<rtx, rtx> *else_vals,
2886 bool else_block_p)
2887 {
2888 enum rtx_code code;
2889 rtx_insn *insn;
2890 rtx cond_arg0, cond_arg1;
2891
2892 code = GET_CODE (cond);
2893 cond_arg0 = XEXP (cond, 0);
2894 cond_arg1 = XEXP (cond, 1);
2895
2896 FOR_BB_INSNS (bb, insn)
2897 {
2898 rtx set, target, dest, t, e;
2899
2900 /* ??? Maybe emit conditional debug insn? */
2901 if (!NONDEBUG_INSN_P (insn) || JUMP_P (insn))
2902 continue;
2903 set = single_set (insn);
2904 gcc_assert (set && REG_P (SET_DEST (set)));
2905
2906 dest = SET_DEST (set);
2907
2908 rtx *then_slot = then_vals->get (dest);
2909 rtx *else_slot = else_vals->get (dest);
2910 t = then_slot ? *then_slot : NULL_RTX;
2911 e = else_slot ? *else_slot : NULL_RTX;
2912
2913 if (else_block_p)
2914 {
2915 /* If this register was set in the then block, we already
2916 handled this case there. */
2917 if (t)
2918 continue;
2919 t = dest;
2920 gcc_assert (e);
2921 }
2922 else
2923 {
2924 gcc_assert (t);
2925 if (!e)
2926 e = dest;
2927 }
2928
2929 target = noce_emit_cmove (if_infop, dest, code, cond_arg0, cond_arg1,
2930 t, e);
2931 if (!target)
2932 return false;
2933
2934 if (target != dest)
2935 noce_emit_move_insn (dest, target);
2936 }
2937
2938 return true;
2939 }
2940
2941 /* Given a simple IF-THEN-JOIN or IF-THEN-ELSE-JOIN block, attempt to convert
2942 it using only conditional moves. Return TRUE if we were successful at
2943 converting the block. */
2944
2945 static int
2946 cond_move_process_if_block (struct noce_if_info *if_info)
2947 {
2948 basic_block test_bb = if_info->test_bb;
2949 basic_block then_bb = if_info->then_bb;
2950 basic_block else_bb = if_info->else_bb;
2951 basic_block join_bb = if_info->join_bb;
2952 rtx_insn *jump = if_info->jump;
2953 rtx cond = if_info->cond;
2954 rtx_insn *seq, *loc_insn;
2955 rtx reg;
2956 int c;
2957 vec<rtx> then_regs = vNULL;
2958 vec<rtx> else_regs = vNULL;
2959 unsigned int i;
2960 int success_p = FALSE;
2961
2962 /* Build a mapping for each block to the value used for each
2963 register. */
2964 hash_map<rtx, rtx> then_vals;
2965 hash_map<rtx, rtx> else_vals;
2966
2967 /* Make sure the blocks are suitable. */
2968 if (!check_cond_move_block (then_bb, &then_vals, &then_regs, cond)
2969 || (else_bb
2970 && !check_cond_move_block (else_bb, &else_vals, &else_regs, cond)))
2971 goto done;
2972
2973 /* Make sure the blocks can be used together. If the same register
2974 is set in both blocks, and is not set to a constant in both
2975 cases, then both blocks must set it to the same register. We
2976 have already verified that if it is set to a register, that the
2977 source register does not change after the assignment. Also count
2978 the number of registers set in only one of the blocks. */
2979 c = 0;
2980 FOR_EACH_VEC_ELT (then_regs, i, reg)
2981 {
2982 rtx *then_slot = then_vals.get (reg);
2983 rtx *else_slot = else_vals.get (reg);
2984
2985 gcc_checking_assert (then_slot);
2986 if (!else_slot)
2987 ++c;
2988 else
2989 {
2990 rtx then_val = *then_slot;
2991 rtx else_val = *else_slot;
2992 if (!CONSTANT_P (then_val) && !CONSTANT_P (else_val)
2993 && !rtx_equal_p (then_val, else_val))
2994 goto done;
2995 }
2996 }
2997
2998 /* Finish off c for MAX_CONDITIONAL_EXECUTE. */
2999 FOR_EACH_VEC_ELT (else_regs, i, reg)
3000 {
3001 gcc_checking_assert (else_vals.get (reg));
3002 if (!then_vals.get (reg))
3003 ++c;
3004 }
3005
3006 /* Make sure it is reasonable to convert this block. What matters
3007 is the number of assignments currently made in only one of the
3008 branches, since if we convert we are going to always execute
3009 them. */
3010 if (c > MAX_CONDITIONAL_EXECUTE)
3011 goto done;
3012
3013 /* Try to emit the conditional moves. First do the then block,
3014 then do anything left in the else blocks. */
3015 start_sequence ();
3016 if (!cond_move_convert_if_block (if_info, then_bb, cond,
3017 &then_vals, &else_vals, false)
3018 || (else_bb
3019 && !cond_move_convert_if_block (if_info, else_bb, cond,
3020 &then_vals, &else_vals, true)))
3021 {
3022 end_sequence ();
3023 goto done;
3024 }
3025 seq = end_ifcvt_sequence (if_info);
3026 if (!seq)
3027 goto done;
3028
3029 loc_insn = first_active_insn (then_bb);
3030 if (!loc_insn)
3031 {
3032 loc_insn = first_active_insn (else_bb);
3033 gcc_assert (loc_insn);
3034 }
3035 emit_insn_before_setloc (seq, jump, INSN_LOCATION (loc_insn));
3036
3037 if (else_bb)
3038 {
3039 delete_basic_block (else_bb);
3040 num_true_changes++;
3041 }
3042 else
3043 remove_edge (find_edge (test_bb, join_bb));
3044
3045 remove_edge (find_edge (then_bb, join_bb));
3046 redirect_edge_and_branch_force (single_succ_edge (test_bb), join_bb);
3047 delete_basic_block (then_bb);
3048 num_true_changes++;
3049
3050 if (can_merge_blocks_p (test_bb, join_bb))
3051 {
3052 merge_blocks (test_bb, join_bb);
3053 num_true_changes++;
3054 }
3055
3056 num_updated_if_blocks++;
3057
3058 success_p = TRUE;
3059
3060 done:
3061 then_regs.release ();
3062 else_regs.release ();
3063 return success_p;
3064 }
3065
3066 \f
3067 /* Determine if a given basic block heads a simple IF-THEN-JOIN or an
3068 IF-THEN-ELSE-JOIN block.
3069
3070 If so, we'll try to convert the insns to not require the branch,
3071 using only transformations that do not require conditional execution.
3072
3073 Return TRUE if we were successful at converting the block. */
3074
3075 static int
3076 noce_find_if_block (basic_block test_bb, edge then_edge, edge else_edge,
3077 int pass)
3078 {
3079 basic_block then_bb, else_bb, join_bb;
3080 bool then_else_reversed = false;
3081 rtx_insn *jump;
3082 rtx cond;
3083 rtx_insn *cond_earliest;
3084 struct noce_if_info if_info;
3085
3086 /* We only ever should get here before reload. */
3087 gcc_assert (!reload_completed);
3088
3089 /* Recognize an IF-THEN-ELSE-JOIN block. */
3090 if (single_pred_p (then_edge->dest)
3091 && single_succ_p (then_edge->dest)
3092 && single_pred_p (else_edge->dest)
3093 && single_succ_p (else_edge->dest)
3094 && single_succ (then_edge->dest) == single_succ (else_edge->dest))
3095 {
3096 then_bb = then_edge->dest;
3097 else_bb = else_edge->dest;
3098 join_bb = single_succ (then_bb);
3099 }
3100 /* Recognize an IF-THEN-JOIN block. */
3101 else if (single_pred_p (then_edge->dest)
3102 && single_succ_p (then_edge->dest)
3103 && single_succ (then_edge->dest) == else_edge->dest)
3104 {
3105 then_bb = then_edge->dest;
3106 else_bb = NULL_BLOCK;
3107 join_bb = else_edge->dest;
3108 }
3109 /* Recognize an IF-ELSE-JOIN block. We can have those because the order
3110 of basic blocks in cfglayout mode does not matter, so the fallthrough
3111 edge can go to any basic block (and not just to bb->next_bb, like in
3112 cfgrtl mode). */
3113 else if (single_pred_p (else_edge->dest)
3114 && single_succ_p (else_edge->dest)
3115 && single_succ (else_edge->dest) == then_edge->dest)
3116 {
3117 /* The noce transformations do not apply to IF-ELSE-JOIN blocks.
3118 To make this work, we have to invert the THEN and ELSE blocks
3119 and reverse the jump condition. */
3120 then_bb = else_edge->dest;
3121 else_bb = NULL_BLOCK;
3122 join_bb = single_succ (then_bb);
3123 then_else_reversed = true;
3124 }
3125 else
3126 /* Not a form we can handle. */
3127 return FALSE;
3128
3129 /* The edges of the THEN and ELSE blocks cannot have complex edges. */
3130 if (single_succ_edge (then_bb)->flags & EDGE_COMPLEX)
3131 return FALSE;
3132 if (else_bb
3133 && single_succ_edge (else_bb)->flags & EDGE_COMPLEX)
3134 return FALSE;
3135
3136 num_possible_if_blocks++;
3137
3138 if (dump_file)
3139 {
3140 fprintf (dump_file,
3141 "\nIF-THEN%s-JOIN block found, pass %d, test %d, then %d",
3142 (else_bb) ? "-ELSE" : "",
3143 pass, test_bb->index, then_bb->index);
3144
3145 if (else_bb)
3146 fprintf (dump_file, ", else %d", else_bb->index);
3147
3148 fprintf (dump_file, ", join %d\n", join_bb->index);
3149 }
3150
3151 /* If the conditional jump is more than just a conditional
3152 jump, then we can not do if-conversion on this block. */
3153 jump = BB_END (test_bb);
3154 if (! onlyjump_p (jump))
3155 return FALSE;
3156
3157 /* If this is not a standard conditional jump, we can't parse it. */
3158 cond = noce_get_condition (jump, &cond_earliest, then_else_reversed);
3159 if (!cond)
3160 return FALSE;
3161
3162 /* We must be comparing objects whose modes imply the size. */
3163 if (GET_MODE (XEXP (cond, 0)) == BLKmode)
3164 return FALSE;
3165
3166 /* Initialize an IF_INFO struct to pass around. */
3167 memset (&if_info, 0, sizeof if_info);
3168 if_info.test_bb = test_bb;
3169 if_info.then_bb = then_bb;
3170 if_info.else_bb = else_bb;
3171 if_info.join_bb = join_bb;
3172 if_info.cond = cond;
3173 if_info.cond_earliest = cond_earliest;
3174 if_info.jump = jump;
3175 if_info.then_else_reversed = then_else_reversed;
3176 if_info.branch_cost = BRANCH_COST (optimize_bb_for_speed_p (test_bb),
3177 predictable_edge_p (then_edge));
3178
3179 /* Do the real work. */
3180
3181 if (noce_process_if_block (&if_info))
3182 return TRUE;
3183
3184 if (HAVE_conditional_move
3185 && cond_move_process_if_block (&if_info))
3186 return TRUE;
3187
3188 return FALSE;
3189 }
3190 \f
3191
3192 /* Merge the blocks and mark for local life update. */
3193
3194 static void
3195 merge_if_block (struct ce_if_block * ce_info)
3196 {
3197 basic_block test_bb = ce_info->test_bb; /* last test block */
3198 basic_block then_bb = ce_info->then_bb; /* THEN */
3199 basic_block else_bb = ce_info->else_bb; /* ELSE or NULL */
3200 basic_block join_bb = ce_info->join_bb; /* join block */
3201 basic_block combo_bb;
3202
3203 /* All block merging is done into the lower block numbers. */
3204
3205 combo_bb = test_bb;
3206 df_set_bb_dirty (test_bb);
3207
3208 /* Merge any basic blocks to handle && and || subtests. Each of
3209 the blocks are on the fallthru path from the predecessor block. */
3210 if (ce_info->num_multiple_test_blocks > 0)
3211 {
3212 basic_block bb = test_bb;
3213 basic_block last_test_bb = ce_info->last_test_bb;
3214 basic_block fallthru = block_fallthru (bb);
3215
3216 do
3217 {
3218 bb = fallthru;
3219 fallthru = block_fallthru (bb);
3220 merge_blocks (combo_bb, bb);
3221 num_true_changes++;
3222 }
3223 while (bb != last_test_bb);
3224 }
3225
3226 /* Merge TEST block into THEN block. Normally the THEN block won't have a
3227 label, but it might if there were || tests. That label's count should be
3228 zero, and it normally should be removed. */
3229
3230 if (then_bb)
3231 {
3232 /* If THEN_BB has no successors, then there's a BARRIER after it.
3233 If COMBO_BB has more than one successor (THEN_BB), then that BARRIER
3234 is no longer needed, and in fact it is incorrect to leave it in
3235 the insn stream. */
3236 if (EDGE_COUNT (then_bb->succs) == 0
3237 && EDGE_COUNT (combo_bb->succs) > 1)
3238 {
3239 rtx_insn *end = NEXT_INSN (BB_END (then_bb));
3240 while (end && NOTE_P (end) && !NOTE_INSN_BASIC_BLOCK_P (end))
3241 end = NEXT_INSN (end);
3242
3243 if (end && BARRIER_P (end))
3244 delete_insn (end);
3245 }
3246 merge_blocks (combo_bb, then_bb);
3247 num_true_changes++;
3248 }
3249
3250 /* The ELSE block, if it existed, had a label. That label count
3251 will almost always be zero, but odd things can happen when labels
3252 get their addresses taken. */
3253 if (else_bb)
3254 {
3255 /* If ELSE_BB has no successors, then there's a BARRIER after it.
3256 If COMBO_BB has more than one successor (ELSE_BB), then that BARRIER
3257 is no longer needed, and in fact it is incorrect to leave it in
3258 the insn stream. */
3259 if (EDGE_COUNT (else_bb->succs) == 0
3260 && EDGE_COUNT (combo_bb->succs) > 1)
3261 {
3262 rtx_insn *end = NEXT_INSN (BB_END (else_bb));
3263 while (end && NOTE_P (end) && !NOTE_INSN_BASIC_BLOCK_P (end))
3264 end = NEXT_INSN (end);
3265
3266 if (end && BARRIER_P (end))
3267 delete_insn (end);
3268 }
3269 merge_blocks (combo_bb, else_bb);
3270 num_true_changes++;
3271 }
3272
3273 /* If there was no join block reported, that means it was not adjacent
3274 to the others, and so we cannot merge them. */
3275
3276 if (! join_bb)
3277 {
3278 rtx_insn *last = BB_END (combo_bb);
3279
3280 /* The outgoing edge for the current COMBO block should already
3281 be correct. Verify this. */
3282 if (EDGE_COUNT (combo_bb->succs) == 0)
3283 gcc_assert (find_reg_note (last, REG_NORETURN, NULL)
3284 || (NONJUMP_INSN_P (last)
3285 && GET_CODE (PATTERN (last)) == TRAP_IF
3286 && (TRAP_CONDITION (PATTERN (last))
3287 == const_true_rtx)));
3288
3289 else
3290 /* There should still be something at the end of the THEN or ELSE
3291 blocks taking us to our final destination. */
3292 gcc_assert (JUMP_P (last)
3293 || (EDGE_SUCC (combo_bb, 0)->dest
3294 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3295 && CALL_P (last)
3296 && SIBLING_CALL_P (last))
3297 || ((EDGE_SUCC (combo_bb, 0)->flags & EDGE_EH)
3298 && can_throw_internal (last)));
3299 }
3300
3301 /* The JOIN block may have had quite a number of other predecessors too.
3302 Since we've already merged the TEST, THEN and ELSE blocks, we should
3303 have only one remaining edge from our if-then-else diamond. If there
3304 is more than one remaining edge, it must come from elsewhere. There
3305 may be zero incoming edges if the THEN block didn't actually join
3306 back up (as with a call to a non-return function). */
3307 else if (EDGE_COUNT (join_bb->preds) < 2
3308 && join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3309 {
3310 /* We can merge the JOIN cleanly and update the dataflow try
3311 again on this pass.*/
3312 merge_blocks (combo_bb, join_bb);
3313 num_true_changes++;
3314 }
3315 else
3316 {
3317 /* We cannot merge the JOIN. */
3318
3319 /* The outgoing edge for the current COMBO block should already
3320 be correct. Verify this. */
3321 gcc_assert (single_succ_p (combo_bb)
3322 && single_succ (combo_bb) == join_bb);
3323
3324 /* Remove the jump and cruft from the end of the COMBO block. */
3325 if (join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3326 tidy_fallthru_edge (single_succ_edge (combo_bb));
3327 }
3328
3329 num_updated_if_blocks++;
3330 }
3331 \f
3332 /* Find a block ending in a simple IF condition and try to transform it
3333 in some way. When converting a multi-block condition, put the new code
3334 in the first such block and delete the rest. Return a pointer to this
3335 first block if some transformation was done. Return NULL otherwise. */
3336
3337 static basic_block
3338 find_if_header (basic_block test_bb, int pass)
3339 {
3340 ce_if_block ce_info;
3341 edge then_edge;
3342 edge else_edge;
3343
3344 /* The kind of block we're looking for has exactly two successors. */
3345 if (EDGE_COUNT (test_bb->succs) != 2)
3346 return NULL;
3347
3348 then_edge = EDGE_SUCC (test_bb, 0);
3349 else_edge = EDGE_SUCC (test_bb, 1);
3350
3351 if (df_get_bb_dirty (then_edge->dest))
3352 return NULL;
3353 if (df_get_bb_dirty (else_edge->dest))
3354 return NULL;
3355
3356 /* Neither edge should be abnormal. */
3357 if ((then_edge->flags & EDGE_COMPLEX)
3358 || (else_edge->flags & EDGE_COMPLEX))
3359 return NULL;
3360
3361 /* Nor exit the loop. */
3362 if ((then_edge->flags & EDGE_LOOP_EXIT)
3363 || (else_edge->flags & EDGE_LOOP_EXIT))
3364 return NULL;
3365
3366 /* The THEN edge is canonically the one that falls through. */
3367 if (then_edge->flags & EDGE_FALLTHRU)
3368 ;
3369 else if (else_edge->flags & EDGE_FALLTHRU)
3370 {
3371 edge e = else_edge;
3372 else_edge = then_edge;
3373 then_edge = e;
3374 }
3375 else
3376 /* Otherwise this must be a multiway branch of some sort. */
3377 return NULL;
3378
3379 memset (&ce_info, 0, sizeof (ce_info));
3380 ce_info.test_bb = test_bb;
3381 ce_info.then_bb = then_edge->dest;
3382 ce_info.else_bb = else_edge->dest;
3383 ce_info.pass = pass;
3384
3385 #ifdef IFCVT_MACHDEP_INIT
3386 IFCVT_MACHDEP_INIT (&ce_info);
3387 #endif
3388
3389 if (!reload_completed
3390 && noce_find_if_block (test_bb, then_edge, else_edge, pass))
3391 goto success;
3392
3393 if (reload_completed
3394 && targetm.have_conditional_execution ()
3395 && cond_exec_find_if_block (&ce_info))
3396 goto success;
3397
3398 if (HAVE_trap
3399 && optab_handler (ctrap_optab, word_mode) != CODE_FOR_nothing
3400 && find_cond_trap (test_bb, then_edge, else_edge))
3401 goto success;
3402
3403 if (dom_info_state (CDI_POST_DOMINATORS) >= DOM_NO_FAST_QUERY
3404 && (reload_completed || !targetm.have_conditional_execution ()))
3405 {
3406 if (find_if_case_1 (test_bb, then_edge, else_edge))
3407 goto success;
3408 if (find_if_case_2 (test_bb, then_edge, else_edge))
3409 goto success;
3410 }
3411
3412 return NULL;
3413
3414 success:
3415 if (dump_file)
3416 fprintf (dump_file, "Conversion succeeded on pass %d.\n", pass);
3417 /* Set this so we continue looking. */
3418 cond_exec_changed_p = TRUE;
3419 return ce_info.test_bb;
3420 }
3421
3422 /* Return true if a block has two edges, one of which falls through to the next
3423 block, and the other jumps to a specific block, so that we can tell if the
3424 block is part of an && test or an || test. Returns either -1 or the number
3425 of non-note, non-jump, non-USE/CLOBBER insns in the block. */
3426
3427 static int
3428 block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb)
3429 {
3430 edge cur_edge;
3431 int fallthru_p = FALSE;
3432 int jump_p = FALSE;
3433 rtx_insn *insn;
3434 rtx_insn *end;
3435 int n_insns = 0;
3436 edge_iterator ei;
3437
3438 if (!cur_bb || !target_bb)
3439 return -1;
3440
3441 /* If no edges, obviously it doesn't jump or fallthru. */
3442 if (EDGE_COUNT (cur_bb->succs) == 0)
3443 return FALSE;
3444
3445 FOR_EACH_EDGE (cur_edge, ei, cur_bb->succs)
3446 {
3447 if (cur_edge->flags & EDGE_COMPLEX)
3448 /* Anything complex isn't what we want. */
3449 return -1;
3450
3451 else if (cur_edge->flags & EDGE_FALLTHRU)
3452 fallthru_p = TRUE;
3453
3454 else if (cur_edge->dest == target_bb)
3455 jump_p = TRUE;
3456
3457 else
3458 return -1;
3459 }
3460
3461 if ((jump_p & fallthru_p) == 0)
3462 return -1;
3463
3464 /* Don't allow calls in the block, since this is used to group && and ||
3465 together for conditional execution support. ??? we should support
3466 conditional execution support across calls for IA-64 some day, but
3467 for now it makes the code simpler. */
3468 end = BB_END (cur_bb);
3469 insn = BB_HEAD (cur_bb);
3470
3471 while (insn != NULL_RTX)
3472 {
3473 if (CALL_P (insn))
3474 return -1;
3475
3476 if (INSN_P (insn)
3477 && !JUMP_P (insn)
3478 && !DEBUG_INSN_P (insn)
3479 && GET_CODE (PATTERN (insn)) != USE
3480 && GET_CODE (PATTERN (insn)) != CLOBBER)
3481 n_insns++;
3482
3483 if (insn == end)
3484 break;
3485
3486 insn = NEXT_INSN (insn);
3487 }
3488
3489 return n_insns;
3490 }
3491
3492 /* Determine if a given basic block heads a simple IF-THEN or IF-THEN-ELSE
3493 block. If so, we'll try to convert the insns to not require the branch.
3494 Return TRUE if we were successful at converting the block. */
3495
3496 static int
3497 cond_exec_find_if_block (struct ce_if_block * ce_info)
3498 {
3499 basic_block test_bb = ce_info->test_bb;
3500 basic_block then_bb = ce_info->then_bb;
3501 basic_block else_bb = ce_info->else_bb;
3502 basic_block join_bb = NULL_BLOCK;
3503 edge cur_edge;
3504 basic_block next;
3505 edge_iterator ei;
3506
3507 ce_info->last_test_bb = test_bb;
3508
3509 /* We only ever should get here after reload,
3510 and if we have conditional execution. */
3511 gcc_assert (reload_completed && targetm.have_conditional_execution ());
3512
3513 /* Discover if any fall through predecessors of the current test basic block
3514 were && tests (which jump to the else block) or || tests (which jump to
3515 the then block). */
3516 if (single_pred_p (test_bb)
3517 && single_pred_edge (test_bb)->flags == EDGE_FALLTHRU)
3518 {
3519 basic_block bb = single_pred (test_bb);
3520 basic_block target_bb;
3521 int max_insns = MAX_CONDITIONAL_EXECUTE;
3522 int n_insns;
3523
3524 /* Determine if the preceding block is an && or || block. */
3525 if ((n_insns = block_jumps_and_fallthru_p (bb, else_bb)) >= 0)
3526 {
3527 ce_info->and_and_p = TRUE;
3528 target_bb = else_bb;
3529 }
3530 else if ((n_insns = block_jumps_and_fallthru_p (bb, then_bb)) >= 0)
3531 {
3532 ce_info->and_and_p = FALSE;
3533 target_bb = then_bb;
3534 }
3535 else
3536 target_bb = NULL_BLOCK;
3537
3538 if (target_bb && n_insns <= max_insns)
3539 {
3540 int total_insns = 0;
3541 int blocks = 0;
3542
3543 ce_info->last_test_bb = test_bb;
3544
3545 /* Found at least one && or || block, look for more. */
3546 do
3547 {
3548 ce_info->test_bb = test_bb = bb;
3549 total_insns += n_insns;
3550 blocks++;
3551
3552 if (!single_pred_p (bb))
3553 break;
3554
3555 bb = single_pred (bb);
3556 n_insns = block_jumps_and_fallthru_p (bb, target_bb);
3557 }
3558 while (n_insns >= 0 && (total_insns + n_insns) <= max_insns);
3559
3560 ce_info->num_multiple_test_blocks = blocks;
3561 ce_info->num_multiple_test_insns = total_insns;
3562
3563 if (ce_info->and_and_p)
3564 ce_info->num_and_and_blocks = blocks;
3565 else
3566 ce_info->num_or_or_blocks = blocks;
3567 }
3568 }
3569
3570 /* The THEN block of an IF-THEN combo must have exactly one predecessor,
3571 other than any || blocks which jump to the THEN block. */
3572 if ((EDGE_COUNT (then_bb->preds) - ce_info->num_or_or_blocks) != 1)
3573 return FALSE;
3574
3575 /* The edges of the THEN and ELSE blocks cannot have complex edges. */
3576 FOR_EACH_EDGE (cur_edge, ei, then_bb->preds)
3577 {
3578 if (cur_edge->flags & EDGE_COMPLEX)
3579 return FALSE;
3580 }
3581
3582 FOR_EACH_EDGE (cur_edge, ei, else_bb->preds)
3583 {
3584 if (cur_edge->flags & EDGE_COMPLEX)
3585 return FALSE;
3586 }
3587
3588 /* The THEN block of an IF-THEN combo must have zero or one successors. */
3589 if (EDGE_COUNT (then_bb->succs) > 0
3590 && (!single_succ_p (then_bb)
3591 || (single_succ_edge (then_bb)->flags & EDGE_COMPLEX)
3592 || (epilogue_completed
3593 && tablejump_p (BB_END (then_bb), NULL, NULL))))
3594 return FALSE;
3595
3596 /* If the THEN block has no successors, conditional execution can still
3597 make a conditional call. Don't do this unless the ELSE block has
3598 only one incoming edge -- the CFG manipulation is too ugly otherwise.
3599 Check for the last insn of the THEN block being an indirect jump, which
3600 is listed as not having any successors, but confuses the rest of the CE
3601 code processing. ??? we should fix this in the future. */
3602 if (EDGE_COUNT (then_bb->succs) == 0)
3603 {
3604 if (single_pred_p (else_bb) && else_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3605 {
3606 rtx_insn *last_insn = BB_END (then_bb);
3607
3608 while (last_insn
3609 && NOTE_P (last_insn)
3610 && last_insn != BB_HEAD (then_bb))
3611 last_insn = PREV_INSN (last_insn);
3612
3613 if (last_insn
3614 && JUMP_P (last_insn)
3615 && ! simplejump_p (last_insn))
3616 return FALSE;
3617
3618 join_bb = else_bb;
3619 else_bb = NULL_BLOCK;
3620 }
3621 else
3622 return FALSE;
3623 }
3624
3625 /* If the THEN block's successor is the other edge out of the TEST block,
3626 then we have an IF-THEN combo without an ELSE. */
3627 else if (single_succ (then_bb) == else_bb)
3628 {
3629 join_bb = else_bb;
3630 else_bb = NULL_BLOCK;
3631 }
3632
3633 /* If the THEN and ELSE block meet in a subsequent block, and the ELSE
3634 has exactly one predecessor and one successor, and the outgoing edge
3635 is not complex, then we have an IF-THEN-ELSE combo. */
3636 else if (single_succ_p (else_bb)
3637 && single_succ (then_bb) == single_succ (else_bb)
3638 && single_pred_p (else_bb)
3639 && !(single_succ_edge (else_bb)->flags & EDGE_COMPLEX)
3640 && !(epilogue_completed
3641 && tablejump_p (BB_END (else_bb), NULL, NULL)))
3642 join_bb = single_succ (else_bb);
3643
3644 /* Otherwise it is not an IF-THEN or IF-THEN-ELSE combination. */
3645 else
3646 return FALSE;
3647
3648 num_possible_if_blocks++;
3649
3650 if (dump_file)
3651 {
3652 fprintf (dump_file,
3653 "\nIF-THEN%s block found, pass %d, start block %d "
3654 "[insn %d], then %d [%d]",
3655 (else_bb) ? "-ELSE" : "",
3656 ce_info->pass,
3657 test_bb->index,
3658 BB_HEAD (test_bb) ? (int)INSN_UID (BB_HEAD (test_bb)) : -1,
3659 then_bb->index,
3660 BB_HEAD (then_bb) ? (int)INSN_UID (BB_HEAD (then_bb)) : -1);
3661
3662 if (else_bb)
3663 fprintf (dump_file, ", else %d [%d]",
3664 else_bb->index,
3665 BB_HEAD (else_bb) ? (int)INSN_UID (BB_HEAD (else_bb)) : -1);
3666
3667 fprintf (dump_file, ", join %d [%d]",
3668 join_bb->index,
3669 BB_HEAD (join_bb) ? (int)INSN_UID (BB_HEAD (join_bb)) : -1);
3670
3671 if (ce_info->num_multiple_test_blocks > 0)
3672 fprintf (dump_file, ", %d %s block%s last test %d [%d]",
3673 ce_info->num_multiple_test_blocks,
3674 (ce_info->and_and_p) ? "&&" : "||",
3675 (ce_info->num_multiple_test_blocks == 1) ? "" : "s",
3676 ce_info->last_test_bb->index,
3677 ((BB_HEAD (ce_info->last_test_bb))
3678 ? (int)INSN_UID (BB_HEAD (ce_info->last_test_bb))
3679 : -1));
3680
3681 fputc ('\n', dump_file);
3682 }
3683
3684 /* Make sure IF, THEN, and ELSE, blocks are adjacent. Actually, we get the
3685 first condition for free, since we've already asserted that there's a
3686 fallthru edge from IF to THEN. Likewise for the && and || blocks, since
3687 we checked the FALLTHRU flag, those are already adjacent to the last IF
3688 block. */
3689 /* ??? As an enhancement, move the ELSE block. Have to deal with
3690 BLOCK notes, if by no other means than backing out the merge if they
3691 exist. Sticky enough I don't want to think about it now. */
3692 next = then_bb;
3693 if (else_bb && (next = next->next_bb) != else_bb)
3694 return FALSE;
3695 if ((next = next->next_bb) != join_bb
3696 && join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3697 {
3698 if (else_bb)
3699 join_bb = NULL;
3700 else
3701 return FALSE;
3702 }
3703
3704 /* Do the real work. */
3705
3706 ce_info->else_bb = else_bb;
3707 ce_info->join_bb = join_bb;
3708
3709 /* If we have && and || tests, try to first handle combining the && and ||
3710 tests into the conditional code, and if that fails, go back and handle
3711 it without the && and ||, which at present handles the && case if there
3712 was no ELSE block. */
3713 if (cond_exec_process_if_block (ce_info, TRUE))
3714 return TRUE;
3715
3716 if (ce_info->num_multiple_test_blocks)
3717 {
3718 cancel_changes (0);
3719
3720 if (cond_exec_process_if_block (ce_info, FALSE))
3721 return TRUE;
3722 }
3723
3724 return FALSE;
3725 }
3726
3727 /* Convert a branch over a trap, or a branch
3728 to a trap, into a conditional trap. */
3729
3730 static int
3731 find_cond_trap (basic_block test_bb, edge then_edge, edge else_edge)
3732 {
3733 basic_block then_bb = then_edge->dest;
3734 basic_block else_bb = else_edge->dest;
3735 basic_block other_bb, trap_bb;
3736 rtx_insn *trap, *jump;
3737 rtx cond, seq;
3738 rtx_insn *cond_earliest;
3739 enum rtx_code code;
3740
3741 /* Locate the block with the trap instruction. */
3742 /* ??? While we look for no successors, we really ought to allow
3743 EH successors. Need to fix merge_if_block for that to work. */
3744 if ((trap = block_has_only_trap (then_bb)) != NULL)
3745 trap_bb = then_bb, other_bb = else_bb;
3746 else if ((trap = block_has_only_trap (else_bb)) != NULL)
3747 trap_bb = else_bb, other_bb = then_bb;
3748 else
3749 return FALSE;
3750
3751 if (dump_file)
3752 {
3753 fprintf (dump_file, "\nTRAP-IF block found, start %d, trap %d\n",
3754 test_bb->index, trap_bb->index);
3755 }
3756
3757 /* If this is not a standard conditional jump, we can't parse it. */
3758 jump = BB_END (test_bb);
3759 cond = noce_get_condition (jump, &cond_earliest, false);
3760 if (! cond)
3761 return FALSE;
3762
3763 /* If the conditional jump is more than just a conditional jump, then
3764 we can not do if-conversion on this block. */
3765 if (! onlyjump_p (jump))
3766 return FALSE;
3767
3768 /* We must be comparing objects whose modes imply the size. */
3769 if (GET_MODE (XEXP (cond, 0)) == BLKmode)
3770 return FALSE;
3771
3772 /* Reverse the comparison code, if necessary. */
3773 code = GET_CODE (cond);
3774 if (then_bb == trap_bb)
3775 {
3776 code = reversed_comparison_code (cond, jump);
3777 if (code == UNKNOWN)
3778 return FALSE;
3779 }
3780
3781 /* Attempt to generate the conditional trap. */
3782 seq = gen_cond_trap (code, copy_rtx (XEXP (cond, 0)),
3783 copy_rtx (XEXP (cond, 1)),
3784 TRAP_CODE (PATTERN (trap)));
3785 if (seq == NULL)
3786 return FALSE;
3787
3788 /* Emit the new insns before cond_earliest. */
3789 emit_insn_before_setloc (seq, cond_earliest, INSN_LOCATION (trap));
3790
3791 /* Delete the trap block if possible. */
3792 remove_edge (trap_bb == then_bb ? then_edge : else_edge);
3793 df_set_bb_dirty (test_bb);
3794 df_set_bb_dirty (then_bb);
3795 df_set_bb_dirty (else_bb);
3796
3797 if (EDGE_COUNT (trap_bb->preds) == 0)
3798 {
3799 delete_basic_block (trap_bb);
3800 num_true_changes++;
3801 }
3802
3803 /* Wire together the blocks again. */
3804 if (current_ir_type () == IR_RTL_CFGLAYOUT)
3805 single_succ_edge (test_bb)->flags |= EDGE_FALLTHRU;
3806 else if (trap_bb == then_bb)
3807 {
3808 rtx lab;
3809 rtx_insn *newjump;
3810
3811 lab = JUMP_LABEL (jump);
3812 newjump = emit_jump_insn_after (gen_jump (lab), jump);
3813 LABEL_NUSES (lab) += 1;
3814 JUMP_LABEL (newjump) = lab;
3815 emit_barrier_after (newjump);
3816 }
3817 delete_insn (jump);
3818
3819 if (can_merge_blocks_p (test_bb, other_bb))
3820 {
3821 merge_blocks (test_bb, other_bb);
3822 num_true_changes++;
3823 }
3824
3825 num_updated_if_blocks++;
3826 return TRUE;
3827 }
3828
3829 /* Subroutine of find_cond_trap: if BB contains only a trap insn,
3830 return it. */
3831
3832 static rtx_insn *
3833 block_has_only_trap (basic_block bb)
3834 {
3835 rtx_insn *trap;
3836
3837 /* We're not the exit block. */
3838 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
3839 return NULL;
3840
3841 /* The block must have no successors. */
3842 if (EDGE_COUNT (bb->succs) > 0)
3843 return NULL;
3844
3845 /* The only instruction in the THEN block must be the trap. */
3846 trap = first_active_insn (bb);
3847 if (! (trap == BB_END (bb)
3848 && GET_CODE (PATTERN (trap)) == TRAP_IF
3849 && TRAP_CONDITION (PATTERN (trap)) == const_true_rtx))
3850 return NULL;
3851
3852 return trap;
3853 }
3854
3855 /* Look for IF-THEN-ELSE cases in which one of THEN or ELSE is
3856 transformable, but not necessarily the other. There need be no
3857 JOIN block.
3858
3859 Return TRUE if we were successful at converting the block.
3860
3861 Cases we'd like to look at:
3862
3863 (1)
3864 if (test) goto over; // x not live
3865 x = a;
3866 goto label;
3867 over:
3868
3869 becomes
3870
3871 x = a;
3872 if (! test) goto label;
3873
3874 (2)
3875 if (test) goto E; // x not live
3876 x = big();
3877 goto L;
3878 E:
3879 x = b;
3880 goto M;
3881
3882 becomes
3883
3884 x = b;
3885 if (test) goto M;
3886 x = big();
3887 goto L;
3888
3889 (3) // This one's really only interesting for targets that can do
3890 // multiway branching, e.g. IA-64 BBB bundles. For other targets
3891 // it results in multiple branches on a cache line, which often
3892 // does not sit well with predictors.
3893
3894 if (test1) goto E; // predicted not taken
3895 x = a;
3896 if (test2) goto F;
3897 ...
3898 E:
3899 x = b;
3900 J:
3901
3902 becomes
3903
3904 x = a;
3905 if (test1) goto E;
3906 if (test2) goto F;
3907
3908 Notes:
3909
3910 (A) Don't do (2) if the branch is predicted against the block we're
3911 eliminating. Do it anyway if we can eliminate a branch; this requires
3912 that the sole successor of the eliminated block postdominate the other
3913 side of the if.
3914
3915 (B) With CE, on (3) we can steal from both sides of the if, creating
3916
3917 if (test1) x = a;
3918 if (!test1) x = b;
3919 if (test1) goto J;
3920 if (test2) goto F;
3921 ...
3922 J:
3923
3924 Again, this is most useful if J postdominates.
3925
3926 (C) CE substitutes for helpful life information.
3927
3928 (D) These heuristics need a lot of work. */
3929
3930 /* Tests for case 1 above. */
3931
3932 static int
3933 find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge)
3934 {
3935 basic_block then_bb = then_edge->dest;
3936 basic_block else_bb = else_edge->dest;
3937 basic_block new_bb;
3938 int then_bb_index, then_prob;
3939 rtx else_target = NULL_RTX;
3940
3941 /* If we are partitioning hot/cold basic blocks, we don't want to
3942 mess up unconditional or indirect jumps that cross between hot
3943 and cold sections.
3944
3945 Basic block partitioning may result in some jumps that appear to
3946 be optimizable (or blocks that appear to be mergeable), but which really
3947 must be left untouched (they are required to make it safely across
3948 partition boundaries). See the comments at the top of
3949 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
3950
3951 if ((BB_END (then_bb)
3952 && JUMP_P (BB_END (then_bb))
3953 && CROSSING_JUMP_P (BB_END (then_bb)))
3954 || (BB_END (test_bb)
3955 && JUMP_P (BB_END (test_bb))
3956 && CROSSING_JUMP_P (BB_END (test_bb)))
3957 || (BB_END (else_bb)
3958 && JUMP_P (BB_END (else_bb))
3959 && CROSSING_JUMP_P (BB_END (else_bb))))
3960 return FALSE;
3961
3962 /* THEN has one successor. */
3963 if (!single_succ_p (then_bb))
3964 return FALSE;
3965
3966 /* THEN does not fall through, but is not strange either. */
3967 if (single_succ_edge (then_bb)->flags & (EDGE_COMPLEX | EDGE_FALLTHRU))
3968 return FALSE;
3969
3970 /* THEN has one predecessor. */
3971 if (!single_pred_p (then_bb))
3972 return FALSE;
3973
3974 /* THEN must do something. */
3975 if (forwarder_block_p (then_bb))
3976 return FALSE;
3977
3978 num_possible_if_blocks++;
3979 if (dump_file)
3980 fprintf (dump_file,
3981 "\nIF-CASE-1 found, start %d, then %d\n",
3982 test_bb->index, then_bb->index);
3983
3984 if (then_edge->probability)
3985 then_prob = REG_BR_PROB_BASE - then_edge->probability;
3986 else
3987 then_prob = REG_BR_PROB_BASE / 2;
3988
3989 /* We're speculating from the THEN path, we want to make sure the cost
3990 of speculation is within reason. */
3991 if (! cheap_bb_rtx_cost_p (then_bb, then_prob,
3992 COSTS_N_INSNS (BRANCH_COST (optimize_bb_for_speed_p (then_edge->src),
3993 predictable_edge_p (then_edge)))))
3994 return FALSE;
3995
3996 if (else_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
3997 {
3998 rtx_insn *jump = BB_END (else_edge->src);
3999 gcc_assert (JUMP_P (jump));
4000 else_target = JUMP_LABEL (jump);
4001 }
4002
4003 /* Registers set are dead, or are predicable. */
4004 if (! dead_or_predicable (test_bb, then_bb, else_bb,
4005 single_succ_edge (then_bb), 1))
4006 return FALSE;
4007
4008 /* Conversion went ok, including moving the insns and fixing up the
4009 jump. Adjust the CFG to match. */
4010
4011 /* We can avoid creating a new basic block if then_bb is immediately
4012 followed by else_bb, i.e. deleting then_bb allows test_bb to fall
4013 through to else_bb. */
4014
4015 if (then_bb->next_bb == else_bb
4016 && then_bb->prev_bb == test_bb
4017 && else_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4018 {
4019 redirect_edge_succ (FALLTHRU_EDGE (test_bb), else_bb);
4020 new_bb = 0;
4021 }
4022 else if (else_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4023 new_bb = force_nonfallthru_and_redirect (FALLTHRU_EDGE (test_bb),
4024 else_bb, else_target);
4025 else
4026 new_bb = redirect_edge_and_branch_force (FALLTHRU_EDGE (test_bb),
4027 else_bb);
4028
4029 df_set_bb_dirty (test_bb);
4030 df_set_bb_dirty (else_bb);
4031
4032 then_bb_index = then_bb->index;
4033 delete_basic_block (then_bb);
4034
4035 /* Make rest of code believe that the newly created block is the THEN_BB
4036 block we removed. */
4037 if (new_bb)
4038 {
4039 df_bb_replace (then_bb_index, new_bb);
4040 /* This should have been done above via force_nonfallthru_and_redirect
4041 (possibly called from redirect_edge_and_branch_force). */
4042 gcc_checking_assert (BB_PARTITION (new_bb) == BB_PARTITION (test_bb));
4043 }
4044
4045 num_true_changes++;
4046 num_updated_if_blocks++;
4047
4048 return TRUE;
4049 }
4050
4051 /* Test for case 2 above. */
4052
4053 static int
4054 find_if_case_2 (basic_block test_bb, edge then_edge, edge else_edge)
4055 {
4056 basic_block then_bb = then_edge->dest;
4057 basic_block else_bb = else_edge->dest;
4058 edge else_succ;
4059 int then_prob, else_prob;
4060
4061 /* We do not want to speculate (empty) loop latches. */
4062 if (current_loops
4063 && else_bb->loop_father->latch == else_bb)
4064 return FALSE;
4065
4066 /* If we are partitioning hot/cold basic blocks, we don't want to
4067 mess up unconditional or indirect jumps that cross between hot
4068 and cold sections.
4069
4070 Basic block partitioning may result in some jumps that appear to
4071 be optimizable (or blocks that appear to be mergeable), but which really
4072 must be left untouched (they are required to make it safely across
4073 partition boundaries). See the comments at the top of
4074 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4075
4076 if ((BB_END (then_bb)
4077 && JUMP_P (BB_END (then_bb))
4078 && CROSSING_JUMP_P (BB_END (then_bb)))
4079 || (BB_END (test_bb)
4080 && JUMP_P (BB_END (test_bb))
4081 && CROSSING_JUMP_P (BB_END (test_bb)))
4082 || (BB_END (else_bb)
4083 && JUMP_P (BB_END (else_bb))
4084 && CROSSING_JUMP_P (BB_END (else_bb))))
4085 return FALSE;
4086
4087 /* ELSE has one successor. */
4088 if (!single_succ_p (else_bb))
4089 return FALSE;
4090 else
4091 else_succ = single_succ_edge (else_bb);
4092
4093 /* ELSE outgoing edge is not complex. */
4094 if (else_succ->flags & EDGE_COMPLEX)
4095 return FALSE;
4096
4097 /* ELSE has one predecessor. */
4098 if (!single_pred_p (else_bb))
4099 return FALSE;
4100
4101 /* THEN is not EXIT. */
4102 if (then_bb->index < NUM_FIXED_BLOCKS)
4103 return FALSE;
4104
4105 if (else_edge->probability)
4106 {
4107 else_prob = else_edge->probability;
4108 then_prob = REG_BR_PROB_BASE - else_prob;
4109 }
4110 else
4111 {
4112 else_prob = REG_BR_PROB_BASE / 2;
4113 then_prob = REG_BR_PROB_BASE / 2;
4114 }
4115
4116 /* ELSE is predicted or SUCC(ELSE) postdominates THEN. */
4117 if (else_prob > then_prob)
4118 ;
4119 else if (else_succ->dest->index < NUM_FIXED_BLOCKS
4120 || dominated_by_p (CDI_POST_DOMINATORS, then_bb,
4121 else_succ->dest))
4122 ;
4123 else
4124 return FALSE;
4125
4126 num_possible_if_blocks++;
4127 if (dump_file)
4128 fprintf (dump_file,
4129 "\nIF-CASE-2 found, start %d, else %d\n",
4130 test_bb->index, else_bb->index);
4131
4132 /* We're speculating from the ELSE path, we want to make sure the cost
4133 of speculation is within reason. */
4134 if (! cheap_bb_rtx_cost_p (else_bb, else_prob,
4135 COSTS_N_INSNS (BRANCH_COST (optimize_bb_for_speed_p (else_edge->src),
4136 predictable_edge_p (else_edge)))))
4137 return FALSE;
4138
4139 /* Registers set are dead, or are predicable. */
4140 if (! dead_or_predicable (test_bb, else_bb, then_bb, else_succ, 0))
4141 return FALSE;
4142
4143 /* Conversion went ok, including moving the insns and fixing up the
4144 jump. Adjust the CFG to match. */
4145
4146 df_set_bb_dirty (test_bb);
4147 df_set_bb_dirty (then_bb);
4148 delete_basic_block (else_bb);
4149
4150 num_true_changes++;
4151 num_updated_if_blocks++;
4152
4153 /* ??? We may now fallthru from one of THEN's successors into a join
4154 block. Rerun cleanup_cfg? Examine things manually? Wait? */
4155
4156 return TRUE;
4157 }
4158
4159 /* Used by the code above to perform the actual rtl transformations.
4160 Return TRUE if successful.
4161
4162 TEST_BB is the block containing the conditional branch. MERGE_BB
4163 is the block containing the code to manipulate. DEST_EDGE is an
4164 edge representing a jump to the join block; after the conversion,
4165 TEST_BB should be branching to its destination.
4166 REVERSEP is true if the sense of the branch should be reversed. */
4167
4168 static int
4169 dead_or_predicable (basic_block test_bb, basic_block merge_bb,
4170 basic_block other_bb, edge dest_edge, int reversep)
4171 {
4172 basic_block new_dest = dest_edge->dest;
4173 rtx_insn *head, *end, *jump;
4174 rtx_insn *earliest = NULL;
4175 rtx old_dest;
4176 bitmap merge_set = NULL;
4177 /* Number of pending changes. */
4178 int n_validated_changes = 0;
4179 rtx new_dest_label = NULL_RTX;
4180
4181 jump = BB_END (test_bb);
4182
4183 /* Find the extent of the real code in the merge block. */
4184 head = BB_HEAD (merge_bb);
4185 end = BB_END (merge_bb);
4186
4187 while (DEBUG_INSN_P (end) && end != head)
4188 end = PREV_INSN (end);
4189
4190 /* If merge_bb ends with a tablejump, predicating/moving insn's
4191 into test_bb and then deleting merge_bb will result in the jumptable
4192 that follows merge_bb being removed along with merge_bb and then we
4193 get an unresolved reference to the jumptable. */
4194 if (tablejump_p (end, NULL, NULL))
4195 return FALSE;
4196
4197 if (LABEL_P (head))
4198 head = NEXT_INSN (head);
4199 while (DEBUG_INSN_P (head) && head != end)
4200 head = NEXT_INSN (head);
4201 if (NOTE_P (head))
4202 {
4203 if (head == end)
4204 {
4205 head = end = NULL;
4206 goto no_body;
4207 }
4208 head = NEXT_INSN (head);
4209 while (DEBUG_INSN_P (head) && head != end)
4210 head = NEXT_INSN (head);
4211 }
4212
4213 if (JUMP_P (end))
4214 {
4215 if (!onlyjump_p (end))
4216 return FALSE;
4217 if (head == end)
4218 {
4219 head = end = NULL;
4220 goto no_body;
4221 }
4222 end = PREV_INSN (end);
4223 while (DEBUG_INSN_P (end) && end != head)
4224 end = PREV_INSN (end);
4225 }
4226
4227 /* Don't move frame-related insn across the conditional branch. This
4228 can lead to one of the paths of the branch having wrong unwind info. */
4229 if (epilogue_completed)
4230 {
4231 rtx_insn *insn = head;
4232 while (1)
4233 {
4234 if (INSN_P (insn) && RTX_FRAME_RELATED_P (insn))
4235 return FALSE;
4236 if (insn == end)
4237 break;
4238 insn = NEXT_INSN (insn);
4239 }
4240 }
4241
4242 /* Disable handling dead code by conditional execution if the machine needs
4243 to do anything funny with the tests, etc. */
4244 #ifndef IFCVT_MODIFY_TESTS
4245 if (targetm.have_conditional_execution ())
4246 {
4247 /* In the conditional execution case, we have things easy. We know
4248 the condition is reversible. We don't have to check life info
4249 because we're going to conditionally execute the code anyway.
4250 All that's left is making sure the insns involved can actually
4251 be predicated. */
4252
4253 rtx cond;
4254
4255 cond = cond_exec_get_condition (jump);
4256 if (! cond)
4257 return FALSE;
4258
4259 rtx note = find_reg_note (jump, REG_BR_PROB, NULL_RTX);
4260 int prob_val = (note ? XINT (note, 0) : -1);
4261
4262 if (reversep)
4263 {
4264 enum rtx_code rev = reversed_comparison_code (cond, jump);
4265 if (rev == UNKNOWN)
4266 return FALSE;
4267 cond = gen_rtx_fmt_ee (rev, GET_MODE (cond), XEXP (cond, 0),
4268 XEXP (cond, 1));
4269 if (prob_val >= 0)
4270 prob_val = REG_BR_PROB_BASE - prob_val;
4271 }
4272
4273 if (cond_exec_process_insns (NULL, head, end, cond, prob_val, 0)
4274 && verify_changes (0))
4275 n_validated_changes = num_validated_changes ();
4276 else
4277 cancel_changes (0);
4278
4279 earliest = jump;
4280 }
4281 #endif
4282
4283 /* If we allocated new pseudos (e.g. in the conditional move
4284 expander called from noce_emit_cmove), we must resize the
4285 array first. */
4286 if (max_regno < max_reg_num ())
4287 max_regno = max_reg_num ();
4288
4289 /* Try the NCE path if the CE path did not result in any changes. */
4290 if (n_validated_changes == 0)
4291 {
4292 rtx cond;
4293 rtx_insn *insn;
4294 regset live;
4295 bool success;
4296
4297 /* In the non-conditional execution case, we have to verify that there
4298 are no trapping operations, no calls, no references to memory, and
4299 that any registers modified are dead at the branch site. */
4300
4301 if (!any_condjump_p (jump))
4302 return FALSE;
4303
4304 /* Find the extent of the conditional. */
4305 cond = noce_get_condition (jump, &earliest, false);
4306 if (!cond)
4307 return FALSE;
4308
4309 live = BITMAP_ALLOC (&reg_obstack);
4310 simulate_backwards_to_point (merge_bb, live, end);
4311 success = can_move_insns_across (head, end, earliest, jump,
4312 merge_bb, live,
4313 df_get_live_in (other_bb), NULL);
4314 BITMAP_FREE (live);
4315 if (!success)
4316 return FALSE;
4317
4318 /* Collect the set of registers set in MERGE_BB. */
4319 merge_set = BITMAP_ALLOC (&reg_obstack);
4320
4321 FOR_BB_INSNS (merge_bb, insn)
4322 if (NONDEBUG_INSN_P (insn))
4323 df_simulate_find_defs (insn, merge_set);
4324
4325 /* If shrink-wrapping, disable this optimization when test_bb is
4326 the first basic block and merge_bb exits. The idea is to not
4327 move code setting up a return register as that may clobber a
4328 register used to pass function parameters, which then must be
4329 saved in caller-saved regs. A caller-saved reg requires the
4330 prologue, killing a shrink-wrap opportunity. */
4331 if ((SHRINK_WRAPPING_ENABLED && !epilogue_completed)
4332 && ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == test_bb
4333 && single_succ_p (new_dest)
4334 && single_succ (new_dest) == EXIT_BLOCK_PTR_FOR_FN (cfun)
4335 && bitmap_intersect_p (df_get_live_in (new_dest), merge_set))
4336 {
4337 regset return_regs;
4338 unsigned int i;
4339
4340 return_regs = BITMAP_ALLOC (&reg_obstack);
4341
4342 /* Start off with the intersection of regs used to pass
4343 params and regs used to return values. */
4344 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4345 if (FUNCTION_ARG_REGNO_P (i)
4346 && targetm.calls.function_value_regno_p (i))
4347 bitmap_set_bit (return_regs, INCOMING_REGNO (i));
4348
4349 bitmap_and_into (return_regs,
4350 df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4351 bitmap_and_into (return_regs,
4352 df_get_live_in (EXIT_BLOCK_PTR_FOR_FN (cfun)));
4353 if (!bitmap_empty_p (return_regs))
4354 {
4355 FOR_BB_INSNS_REVERSE (new_dest, insn)
4356 if (NONDEBUG_INSN_P (insn))
4357 {
4358 df_ref def;
4359
4360 /* If this insn sets any reg in return_regs, add all
4361 reg uses to the set of regs we're interested in. */
4362 FOR_EACH_INSN_DEF (def, insn)
4363 if (bitmap_bit_p (return_regs, DF_REF_REGNO (def)))
4364 {
4365 df_simulate_uses (insn, return_regs);
4366 break;
4367 }
4368 }
4369 if (bitmap_intersect_p (merge_set, return_regs))
4370 {
4371 BITMAP_FREE (return_regs);
4372 BITMAP_FREE (merge_set);
4373 return FALSE;
4374 }
4375 }
4376 BITMAP_FREE (return_regs);
4377 }
4378 }
4379
4380 no_body:
4381 /* We don't want to use normal invert_jump or redirect_jump because
4382 we don't want to delete_insn called. Also, we want to do our own
4383 change group management. */
4384
4385 old_dest = JUMP_LABEL (jump);
4386 if (other_bb != new_dest)
4387 {
4388 if (!any_condjump_p (jump))
4389 goto cancel;
4390
4391 if (JUMP_P (BB_END (dest_edge->src)))
4392 new_dest_label = JUMP_LABEL (BB_END (dest_edge->src));
4393 else if (new_dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4394 new_dest_label = ret_rtx;
4395 else
4396 new_dest_label = block_label (new_dest);
4397
4398 if (reversep
4399 ? ! invert_jump_1 (jump, new_dest_label)
4400 : ! redirect_jump_1 (jump, new_dest_label))
4401 goto cancel;
4402 }
4403
4404 if (verify_changes (n_validated_changes))
4405 confirm_change_group ();
4406 else
4407 goto cancel;
4408
4409 if (other_bb != new_dest)
4410 {
4411 redirect_jump_2 (jump, old_dest, new_dest_label, 0, reversep);
4412
4413 redirect_edge_succ (BRANCH_EDGE (test_bb), new_dest);
4414 if (reversep)
4415 {
4416 gcov_type count, probability;
4417 count = BRANCH_EDGE (test_bb)->count;
4418 BRANCH_EDGE (test_bb)->count = FALLTHRU_EDGE (test_bb)->count;
4419 FALLTHRU_EDGE (test_bb)->count = count;
4420 probability = BRANCH_EDGE (test_bb)->probability;
4421 BRANCH_EDGE (test_bb)->probability
4422 = FALLTHRU_EDGE (test_bb)->probability;
4423 FALLTHRU_EDGE (test_bb)->probability = probability;
4424 update_br_prob_note (test_bb);
4425 }
4426 }
4427
4428 /* Move the insns out of MERGE_BB to before the branch. */
4429 if (head != NULL)
4430 {
4431 rtx_insn *insn;
4432
4433 if (end == BB_END (merge_bb))
4434 BB_END (merge_bb) = PREV_INSN (head);
4435
4436 /* PR 21767: when moving insns above a conditional branch, the REG_EQUAL
4437 notes being moved might become invalid. */
4438 insn = head;
4439 do
4440 {
4441 rtx note;
4442
4443 if (! INSN_P (insn))
4444 continue;
4445 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
4446 if (! note)
4447 continue;
4448 remove_note (insn, note);
4449 } while (insn != end && (insn = NEXT_INSN (insn)));
4450
4451 /* PR46315: when moving insns above a conditional branch, the REG_EQUAL
4452 notes referring to the registers being set might become invalid. */
4453 if (merge_set)
4454 {
4455 unsigned i;
4456 bitmap_iterator bi;
4457
4458 EXECUTE_IF_SET_IN_BITMAP (merge_set, 0, i, bi)
4459 remove_reg_equal_equiv_notes_for_regno (i);
4460
4461 BITMAP_FREE (merge_set);
4462 }
4463
4464 reorder_insns (head, end, PREV_INSN (earliest));
4465 }
4466
4467 /* Remove the jump and edge if we can. */
4468 if (other_bb == new_dest)
4469 {
4470 delete_insn (jump);
4471 remove_edge (BRANCH_EDGE (test_bb));
4472 /* ??? Can't merge blocks here, as then_bb is still in use.
4473 At minimum, the merge will get done just before bb-reorder. */
4474 }
4475
4476 return TRUE;
4477
4478 cancel:
4479 cancel_changes (0);
4480
4481 if (merge_set)
4482 BITMAP_FREE (merge_set);
4483
4484 return FALSE;
4485 }
4486 \f
4487 /* Main entry point for all if-conversion. AFTER_COMBINE is true if
4488 we are after combine pass. */
4489
4490 static void
4491 if_convert (bool after_combine)
4492 {
4493 basic_block bb;
4494 int pass;
4495
4496 if (optimize == 1)
4497 {
4498 df_live_add_problem ();
4499 df_live_set_all_dirty ();
4500 }
4501
4502 /* Record whether we are after combine pass. */
4503 ifcvt_after_combine = after_combine;
4504 num_possible_if_blocks = 0;
4505 num_updated_if_blocks = 0;
4506 num_true_changes = 0;
4507
4508 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
4509 mark_loop_exit_edges ();
4510 loop_optimizer_finalize ();
4511 free_dominance_info (CDI_DOMINATORS);
4512
4513 /* Compute postdominators. */
4514 calculate_dominance_info (CDI_POST_DOMINATORS);
4515
4516 df_set_flags (DF_LR_RUN_DCE);
4517
4518 /* Go through each of the basic blocks looking for things to convert. If we
4519 have conditional execution, we make multiple passes to allow us to handle
4520 IF-THEN{-ELSE} blocks within other IF-THEN{-ELSE} blocks. */
4521 pass = 0;
4522 do
4523 {
4524 df_analyze ();
4525 /* Only need to do dce on the first pass. */
4526 df_clear_flags (DF_LR_RUN_DCE);
4527 cond_exec_changed_p = FALSE;
4528 pass++;
4529
4530 #ifdef IFCVT_MULTIPLE_DUMPS
4531 if (dump_file && pass > 1)
4532 fprintf (dump_file, "\n\n========== Pass %d ==========\n", pass);
4533 #endif
4534
4535 FOR_EACH_BB_FN (bb, cfun)
4536 {
4537 basic_block new_bb;
4538 while (!df_get_bb_dirty (bb)
4539 && (new_bb = find_if_header (bb, pass)) != NULL)
4540 bb = new_bb;
4541 }
4542
4543 #ifdef IFCVT_MULTIPLE_DUMPS
4544 if (dump_file && cond_exec_changed_p)
4545 print_rtl_with_bb (dump_file, get_insns (), dump_flags);
4546 #endif
4547 }
4548 while (cond_exec_changed_p);
4549
4550 #ifdef IFCVT_MULTIPLE_DUMPS
4551 if (dump_file)
4552 fprintf (dump_file, "\n\n========== no more changes\n");
4553 #endif
4554
4555 free_dominance_info (CDI_POST_DOMINATORS);
4556
4557 if (dump_file)
4558 fflush (dump_file);
4559
4560 clear_aux_for_blocks ();
4561
4562 /* If we allocated new pseudos, we must resize the array for sched1. */
4563 if (max_regno < max_reg_num ())
4564 max_regno = max_reg_num ();
4565
4566 /* Write the final stats. */
4567 if (dump_file && num_possible_if_blocks > 0)
4568 {
4569 fprintf (dump_file,
4570 "\n%d possible IF blocks searched.\n",
4571 num_possible_if_blocks);
4572 fprintf (dump_file,
4573 "%d IF blocks converted.\n",
4574 num_updated_if_blocks);
4575 fprintf (dump_file,
4576 "%d true changes made.\n\n\n",
4577 num_true_changes);
4578 }
4579
4580 if (optimize == 1)
4581 df_remove_problem (df_live);
4582
4583 #ifdef ENABLE_CHECKING
4584 verify_flow_info ();
4585 #endif
4586 }
4587 \f
4588 /* If-conversion and CFG cleanup. */
4589 static unsigned int
4590 rest_of_handle_if_conversion (void)
4591 {
4592 if (flag_if_conversion)
4593 {
4594 if (dump_file)
4595 {
4596 dump_reg_info (dump_file);
4597 dump_flow_info (dump_file, dump_flags);
4598 }
4599 cleanup_cfg (CLEANUP_EXPENSIVE);
4600 if_convert (false);
4601 }
4602
4603 cleanup_cfg (0);
4604 return 0;
4605 }
4606
4607 namespace {
4608
4609 const pass_data pass_data_rtl_ifcvt =
4610 {
4611 RTL_PASS, /* type */
4612 "ce1", /* name */
4613 OPTGROUP_NONE, /* optinfo_flags */
4614 TV_IFCVT, /* tv_id */
4615 0, /* properties_required */
4616 0, /* properties_provided */
4617 0, /* properties_destroyed */
4618 0, /* todo_flags_start */
4619 TODO_df_finish, /* todo_flags_finish */
4620 };
4621
4622 class pass_rtl_ifcvt : public rtl_opt_pass
4623 {
4624 public:
4625 pass_rtl_ifcvt (gcc::context *ctxt)
4626 : rtl_opt_pass (pass_data_rtl_ifcvt, ctxt)
4627 {}
4628
4629 /* opt_pass methods: */
4630 virtual bool gate (function *)
4631 {
4632 return (optimize > 0) && dbg_cnt (if_conversion);
4633 }
4634
4635 virtual unsigned int execute (function *)
4636 {
4637 return rest_of_handle_if_conversion ();
4638 }
4639
4640 }; // class pass_rtl_ifcvt
4641
4642 } // anon namespace
4643
4644 rtl_opt_pass *
4645 make_pass_rtl_ifcvt (gcc::context *ctxt)
4646 {
4647 return new pass_rtl_ifcvt (ctxt);
4648 }
4649
4650
4651 /* Rerun if-conversion, as combine may have simplified things enough
4652 to now meet sequence length restrictions. */
4653
4654 namespace {
4655
4656 const pass_data pass_data_if_after_combine =
4657 {
4658 RTL_PASS, /* type */
4659 "ce2", /* name */
4660 OPTGROUP_NONE, /* optinfo_flags */
4661 TV_IFCVT, /* tv_id */
4662 0, /* properties_required */
4663 0, /* properties_provided */
4664 0, /* properties_destroyed */
4665 0, /* todo_flags_start */
4666 TODO_df_finish, /* todo_flags_finish */
4667 };
4668
4669 class pass_if_after_combine : public rtl_opt_pass
4670 {
4671 public:
4672 pass_if_after_combine (gcc::context *ctxt)
4673 : rtl_opt_pass (pass_data_if_after_combine, ctxt)
4674 {}
4675
4676 /* opt_pass methods: */
4677 virtual bool gate (function *)
4678 {
4679 return optimize > 0 && flag_if_conversion
4680 && dbg_cnt (if_after_combine);
4681 }
4682
4683 virtual unsigned int execute (function *)
4684 {
4685 if_convert (true);
4686 return 0;
4687 }
4688
4689 }; // class pass_if_after_combine
4690
4691 } // anon namespace
4692
4693 rtl_opt_pass *
4694 make_pass_if_after_combine (gcc::context *ctxt)
4695 {
4696 return new pass_if_after_combine (ctxt);
4697 }
4698
4699
4700 namespace {
4701
4702 const pass_data pass_data_if_after_reload =
4703 {
4704 RTL_PASS, /* type */
4705 "ce3", /* name */
4706 OPTGROUP_NONE, /* optinfo_flags */
4707 TV_IFCVT2, /* tv_id */
4708 0, /* properties_required */
4709 0, /* properties_provided */
4710 0, /* properties_destroyed */
4711 0, /* todo_flags_start */
4712 TODO_df_finish, /* todo_flags_finish */
4713 };
4714
4715 class pass_if_after_reload : public rtl_opt_pass
4716 {
4717 public:
4718 pass_if_after_reload (gcc::context *ctxt)
4719 : rtl_opt_pass (pass_data_if_after_reload, ctxt)
4720 {}
4721
4722 /* opt_pass methods: */
4723 virtual bool gate (function *)
4724 {
4725 return optimize > 0 && flag_if_conversion2
4726 && dbg_cnt (if_after_reload);
4727 }
4728
4729 virtual unsigned int execute (function *)
4730 {
4731 if_convert (true);
4732 return 0;
4733 }
4734
4735 }; // class pass_if_after_reload
4736
4737 } // anon namespace
4738
4739 rtl_opt_pass *
4740 make_pass_if_after_reload (gcc::context *ctxt)
4741 {
4742 return new pass_if_after_reload (ctxt);
4743 }