1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with modified_between_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
95 #include "stor-layout.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
108 /* Number of attempts to combine instructions in this function. */
110 static int combine_attempts
;
112 /* Number of attempts that got as far as substitution in this function. */
114 static int combine_merges
;
116 /* Number of instructions combined with added SETs in this function. */
118 static int combine_extras
;
120 /* Number of instructions combined in this function. */
122 static int combine_successes
;
124 /* Totals over entire compilation. */
126 static int total_attempts
, total_merges
, total_extras
, total_successes
;
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
135 static rtx_insn
*i2mod
;
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139 static rtx i2mod_old_rhs
;
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143 static rtx i2mod_new_rhs
;
145 struct reg_stat_type
{
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn
*last_death
;
149 /* Record last point of modification of (hard or pseudo) register n. */
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
158 We use an approach similar to that used by cse, but change it in the
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
165 Therefore, we maintain the following fields:
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
181 (The next two parameters are out of date).
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
195 /* Record last value assigned to (hard or pseudo) register n. */
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
202 int last_set_table_tick
;
204 /* Record the value of label_tick when the value for register n is placed in
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
214 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
215 char last_set_sign_bit_copies
;
216 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
222 char last_set_invalid
;
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
234 If an entry is zero, it means that we don't know anything special. */
236 unsigned char sign_bit_copies
;
238 unsigned HOST_WIDE_INT nonzero_bits
;
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
244 int truncation_label
;
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
251 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
255 static vec
<reg_stat_type
> reg_stat
;
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
265 static unsigned int reg_n_sets_max
;
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
270 static int mem_last_set
;
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
275 static int last_call_luid
;
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
283 static rtx_insn
*subst_insn
;
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
292 static int subst_low_luid
;
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
297 static HARD_REG_SET newpat_used_regs
;
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
303 static rtx_insn
*added_links_insn
;
305 /* And similarly, for notes. */
307 static rtx_insn
*added_notes_insn
;
309 /* Basic block in which we are performing combines. */
310 static basic_block this_basic_block
;
311 static bool optimize_this_for_speed_p
;
314 /* Length of the currently allocated uid_insn_cost array. */
316 static int max_uid_known
;
318 /* The following array records the insn_cost for every insn
319 in the instruction stream. */
321 static int *uid_insn_cost
;
323 /* The following array records the LOG_LINKS for every insn in the
324 instruction stream as struct insn_link pointers. */
329 struct insn_link
*next
;
332 static struct insn_link
**uid_log_links
;
335 insn_uid_check (const_rtx insn
)
337 int uid
= INSN_UID (insn
);
338 gcc_checking_assert (uid
<= max_uid_known
);
342 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
343 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
345 #define FOR_EACH_LOG_LINK(L, INSN) \
346 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
348 /* Links for LOG_LINKS are allocated from this obstack. */
350 static struct obstack insn_link_obstack
;
352 /* Allocate a link. */
354 static inline struct insn_link
*
355 alloc_insn_link (rtx_insn
*insn
, unsigned int regno
, struct insn_link
*next
)
358 = (struct insn_link
*) obstack_alloc (&insn_link_obstack
,
359 sizeof (struct insn_link
));
366 /* Incremented for each basic block. */
368 static int label_tick
;
370 /* Reset to label_tick for each extended basic block in scanning order. */
372 static int label_tick_ebb_start
;
374 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
375 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
377 static scalar_int_mode nonzero_bits_mode
;
379 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
380 be safely used. It is zero while computing them and after combine has
381 completed. This former test prevents propagating values based on
382 previously set values, which can be incorrect if a variable is modified
385 static int nonzero_sign_valid
;
388 /* Record one modification to rtl structure
389 to be undone by storing old_contents into *where. */
391 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
, UNDO_LINKS
};
397 union { rtx r
; int i
; machine_mode m
; struct insn_link
*l
; } old_contents
;
398 union { rtx
*r
; int *i
; struct insn_link
**l
; } where
;
401 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
402 num_undo says how many are currently recorded.
404 other_insn is nonzero if we have modified some other insn in the process
405 of working on subst_insn. It must be verified too. */
411 rtx_insn
*other_insn
;
414 static struct undobuf undobuf
;
416 /* Number of times the pseudo being substituted for
417 was found and replaced. */
419 static int n_occurrences
;
421 static rtx
reg_nonzero_bits_for_combine (const_rtx
, scalar_int_mode
,
423 unsigned HOST_WIDE_INT
*);
424 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, scalar_int_mode
,
427 static void do_SUBST (rtx
*, rtx
);
428 static void do_SUBST_INT (int *, int);
429 static void init_reg_last (void);
430 static void setup_incoming_promotions (rtx_insn
*);
431 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
432 static int cant_combine_insn_p (rtx_insn
*);
433 static int can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
434 rtx_insn
*, rtx_insn
*, rtx
*, rtx
*);
435 static int combinable_i3pat (rtx_insn
*, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
436 static int contains_muldiv (rtx
);
437 static rtx_insn
*try_combine (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
439 static void undo_all (void);
440 static void undo_commit (void);
441 static rtx
*find_split_point (rtx
*, rtx_insn
*, bool);
442 static rtx
subst (rtx
, rtx
, rtx
, int, int, int);
443 static rtx
combine_simplify_rtx (rtx
, machine_mode
, int, int);
444 static rtx
simplify_if_then_else (rtx
);
445 static rtx
simplify_set (rtx
);
446 static rtx
simplify_logical (rtx
);
447 static rtx
expand_compound_operation (rtx
);
448 static const_rtx
expand_field_assignment (const_rtx
);
449 static rtx
make_extraction (machine_mode
, rtx
, HOST_WIDE_INT
,
450 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
451 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
452 unsigned HOST_WIDE_INT
*);
453 static rtx
canon_reg_for_combine (rtx
, rtx
);
454 static rtx
force_int_to_mode (rtx
, scalar_int_mode
, scalar_int_mode
,
455 scalar_int_mode
, unsigned HOST_WIDE_INT
, int);
456 static rtx
force_to_mode (rtx
, machine_mode
,
457 unsigned HOST_WIDE_INT
, int);
458 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
459 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
460 static int rtx_equal_for_field_assignment_p (rtx
, rtx
, bool = false);
461 static rtx
make_field_assignment (rtx
);
462 static rtx
apply_distributive_law (rtx
);
463 static rtx
distribute_and_simplify_rtx (rtx
, int);
464 static rtx
simplify_and_const_int_1 (scalar_int_mode
, rtx
,
465 unsigned HOST_WIDE_INT
);
466 static rtx
simplify_and_const_int (rtx
, scalar_int_mode
, rtx
,
467 unsigned HOST_WIDE_INT
);
468 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
469 HOST_WIDE_INT
, machine_mode
, int *);
470 static rtx
simplify_shift_const_1 (enum rtx_code
, machine_mode
, rtx
, int);
471 static rtx
simplify_shift_const (rtx
, enum rtx_code
, machine_mode
, rtx
,
473 static int recog_for_combine (rtx
*, rtx_insn
*, rtx
*);
474 static rtx
gen_lowpart_for_combine (machine_mode
, rtx
);
475 static enum rtx_code
simplify_compare_const (enum rtx_code
, machine_mode
,
477 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
478 static void update_table_tick (rtx
);
479 static void record_value_for_reg (rtx
, rtx_insn
*, rtx
);
480 static void check_promoted_subreg (rtx_insn
*, rtx
);
481 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
482 static void record_dead_and_set_regs (rtx_insn
*);
483 static int get_last_value_validate (rtx
*, rtx_insn
*, int, int);
484 static rtx
get_last_value (const_rtx
);
485 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
486 static int reg_dead_at_p (rtx
, rtx_insn
*);
487 static void move_deaths (rtx
, rtx
, int, rtx_insn
*, rtx
*);
488 static int reg_bitfield_target_p (rtx
, rtx
);
489 static void distribute_notes (rtx
, rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx
, rtx
, rtx
);
490 static void distribute_links (struct insn_link
*);
491 static void mark_used_regs_combine (rtx
);
492 static void record_promoted_value (rtx_insn
*, rtx
);
493 static bool unmentioned_reg_p (rtx
, rtx
);
494 static void record_truncated_values (rtx
*, void *);
495 static bool reg_truncated_to_mode (machine_mode
, const_rtx
);
496 static rtx
gen_lowpart_or_truncate (machine_mode
, rtx
);
499 /* It is not safe to use ordinary gen_lowpart in combine.
500 See comments in gen_lowpart_for_combine. */
501 #undef RTL_HOOKS_GEN_LOWPART
502 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
504 /* Our implementation of gen_lowpart never emits a new pseudo. */
505 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
506 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
508 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
509 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
511 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
512 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
514 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
515 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
517 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
520 /* Convenience wrapper for the canonicalize_comparison target hook.
521 Target hooks cannot use enum rtx_code. */
523 target_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
,
524 bool op0_preserve_value
)
526 int code_int
= (int)*code
;
527 targetm
.canonicalize_comparison (&code_int
, op0
, op1
, op0_preserve_value
);
528 *code
= (enum rtx_code
)code_int
;
531 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
532 PATTERN can not be split. Otherwise, it returns an insn sequence.
533 This is a wrapper around split_insns which ensures that the
534 reg_stat vector is made larger if the splitter creates a new
538 combine_split_insns (rtx pattern
, rtx_insn
*insn
)
543 ret
= split_insns (pattern
, insn
);
544 nregs
= max_reg_num ();
545 if (nregs
> reg_stat
.length ())
546 reg_stat
.safe_grow_cleared (nregs
);
550 /* This is used by find_single_use to locate an rtx in LOC that
551 contains exactly one use of DEST, which is typically either a REG
552 or CC0. It returns a pointer to the innermost rtx expression
553 containing DEST. Appearances of DEST that are being used to
554 totally replace it are not counted. */
557 find_single_use_1 (rtx dest
, rtx
*loc
)
560 enum rtx_code code
= GET_CODE (x
);
576 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
577 of a REG that occupies all of the REG, the insn uses DEST if
578 it is mentioned in the destination or the source. Otherwise, we
579 need just check the source. */
580 if (GET_CODE (SET_DEST (x
)) != CC0
581 && GET_CODE (SET_DEST (x
)) != PC
582 && !REG_P (SET_DEST (x
))
583 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
584 && REG_P (SUBREG_REG (SET_DEST (x
)))
585 && !read_modify_subreg_p (SET_DEST (x
))))
588 return find_single_use_1 (dest
, &SET_SRC (x
));
592 return find_single_use_1 (dest
, &XEXP (x
, 0));
598 /* If it wasn't one of the common cases above, check each expression and
599 vector of this code. Look for a unique usage of DEST. */
601 fmt
= GET_RTX_FORMAT (code
);
602 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
606 if (dest
== XEXP (x
, i
)
607 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
608 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
611 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
614 result
= this_result
;
615 else if (this_result
)
616 /* Duplicate usage. */
619 else if (fmt
[i
] == 'E')
623 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
625 if (XVECEXP (x
, i
, j
) == dest
627 && REG_P (XVECEXP (x
, i
, j
))
628 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
631 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
634 result
= this_result
;
635 else if (this_result
)
645 /* See if DEST, produced in INSN, is used only a single time in the
646 sequel. If so, return a pointer to the innermost rtx expression in which
649 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
651 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
652 care about REG_DEAD notes or LOG_LINKS.
654 Otherwise, we find the single use by finding an insn that has a
655 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
656 only referenced once in that insn, we know that it must be the first
657 and last insn referencing DEST. */
660 find_single_use (rtx dest
, rtx_insn
*insn
, rtx_insn
**ploc
)
665 struct insn_link
*link
;
669 next
= NEXT_INSN (insn
);
671 || (!NONJUMP_INSN_P (next
) && !JUMP_P (next
)))
674 result
= find_single_use_1 (dest
, &PATTERN (next
));
683 bb
= BLOCK_FOR_INSN (insn
);
684 for (next
= NEXT_INSN (insn
);
685 next
&& BLOCK_FOR_INSN (next
) == bb
;
686 next
= NEXT_INSN (next
))
687 if (NONDEBUG_INSN_P (next
) && dead_or_set_p (next
, dest
))
689 FOR_EACH_LOG_LINK (link
, next
)
690 if (link
->insn
== insn
&& link
->regno
== REGNO (dest
))
695 result
= find_single_use_1 (dest
, &PATTERN (next
));
705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
706 insn. The substitution can be undone by undo_all. If INTO is already
707 set to NEWVAL, do not record this change. Because computing NEWVAL might
708 also call SUBST, we have to compute it before we put anything into
712 do_SUBST (rtx
*into
, rtx newval
)
717 if (oldval
== newval
)
720 /* We'd like to catch as many invalid transformations here as
721 possible. Unfortunately, there are way too many mode changes
722 that are perfectly valid, so we'd waste too much effort for
723 little gain doing the checks here. Focus on catching invalid
724 transformations involving integer constants. */
725 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
726 && CONST_INT_P (newval
))
728 /* Sanity check that we're replacing oldval with a CONST_INT
729 that is a valid sign-extension for the original mode. */
730 gcc_assert (INTVAL (newval
)
731 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
733 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
734 CONST_INT is not valid, because after the replacement, the
735 original mode would be gone. Unfortunately, we can't tell
736 when do_SUBST is called to replace the operand thereof, so we
737 perform this test on oldval instead, checking whether an
738 invalid replacement took place before we got here. */
739 gcc_assert (!(GET_CODE (oldval
) == SUBREG
740 && CONST_INT_P (SUBREG_REG (oldval
))));
741 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
742 && CONST_INT_P (XEXP (oldval
, 0))));
746 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
748 buf
= XNEW (struct undo
);
750 buf
->kind
= UNDO_RTX
;
752 buf
->old_contents
.r
= oldval
;
755 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
758 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
760 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
761 for the value of a HOST_WIDE_INT value (including CONST_INT) is
765 do_SUBST_INT (int *into
, int newval
)
770 if (oldval
== newval
)
774 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
776 buf
= XNEW (struct undo
);
778 buf
->kind
= UNDO_INT
;
780 buf
->old_contents
.i
= oldval
;
783 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
786 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
788 /* Similar to SUBST, but just substitute the mode. This is used when
789 changing the mode of a pseudo-register, so that any other
790 references to the entry in the regno_reg_rtx array will change as
794 do_SUBST_MODE (rtx
*into
, machine_mode newval
)
797 machine_mode oldval
= GET_MODE (*into
);
799 if (oldval
== newval
)
803 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
805 buf
= XNEW (struct undo
);
807 buf
->kind
= UNDO_MODE
;
809 buf
->old_contents
.m
= oldval
;
810 adjust_reg_mode (*into
, newval
);
812 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
815 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
820 do_SUBST_LINK (struct insn_link
**into
, struct insn_link
*newval
)
823 struct insn_link
* oldval
= *into
;
825 if (oldval
== newval
)
829 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
831 buf
= XNEW (struct undo
);
833 buf
->kind
= UNDO_LINKS
;
835 buf
->old_contents
.l
= oldval
;
838 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
843 /* Subroutine of try_combine. Determine whether the replacement patterns
844 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
845 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
846 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
847 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
848 of all the instructions can be estimated and the replacements are more
849 expensive than the original sequence. */
852 combine_validate_cost (rtx_insn
*i0
, rtx_insn
*i1
, rtx_insn
*i2
, rtx_insn
*i3
,
853 rtx newpat
, rtx newi2pat
, rtx newotherpat
)
855 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
856 int new_i2_cost
, new_i3_cost
;
857 int old_cost
, new_cost
;
859 /* Lookup the original insn_costs. */
860 i2_cost
= INSN_COST (i2
);
861 i3_cost
= INSN_COST (i3
);
865 i1_cost
= INSN_COST (i1
);
868 i0_cost
= INSN_COST (i0
);
869 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
870 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
874 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
875 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
881 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
882 i1_cost
= i0_cost
= 0;
885 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
887 if (old_cost
&& i1
&& INSN_UID (i1
) == INSN_UID (i2
))
891 /* Calculate the replacement insn_costs. */
892 rtx tmp
= PATTERN (i3
);
893 PATTERN (i3
) = newpat
;
894 int tmpi
= INSN_CODE (i3
);
896 new_i3_cost
= insn_cost (i3
, optimize_this_for_speed_p
);
898 INSN_CODE (i3
) = tmpi
;
902 PATTERN (i2
) = newi2pat
;
903 tmpi
= INSN_CODE (i2
);
905 new_i2_cost
= insn_cost (i2
, optimize_this_for_speed_p
);
907 INSN_CODE (i2
) = tmpi
;
908 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
909 ? new_i2_cost
+ new_i3_cost
: 0;
913 new_cost
= new_i3_cost
;
917 if (undobuf
.other_insn
)
919 int old_other_cost
, new_other_cost
;
921 old_other_cost
= INSN_COST (undobuf
.other_insn
);
922 tmp
= PATTERN (undobuf
.other_insn
);
923 PATTERN (undobuf
.other_insn
) = newotherpat
;
924 tmpi
= INSN_CODE (undobuf
.other_insn
);
925 INSN_CODE (undobuf
.other_insn
) = -1;
926 new_other_cost
= insn_cost (undobuf
.other_insn
,
927 optimize_this_for_speed_p
);
928 PATTERN (undobuf
.other_insn
) = tmp
;
929 INSN_CODE (undobuf
.other_insn
) = tmpi
;
930 if (old_other_cost
> 0 && new_other_cost
> 0)
932 old_cost
+= old_other_cost
;
933 new_cost
+= new_other_cost
;
939 /* Disallow this combination if both new_cost and old_cost are greater than
940 zero, and new_cost is greater than old cost. */
941 int reject
= old_cost
> 0 && new_cost
> old_cost
;
945 fprintf (dump_file
, "%s combination of insns ",
946 reject
? "rejecting" : "allowing");
948 fprintf (dump_file
, "%d, ", INSN_UID (i0
));
949 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
950 fprintf (dump_file
, "%d, ", INSN_UID (i1
));
951 fprintf (dump_file
, "%d and %d\n", INSN_UID (i2
), INSN_UID (i3
));
953 fprintf (dump_file
, "original costs ");
955 fprintf (dump_file
, "%d + ", i0_cost
);
956 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
957 fprintf (dump_file
, "%d + ", i1_cost
);
958 fprintf (dump_file
, "%d + %d = %d\n", i2_cost
, i3_cost
, old_cost
);
961 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
962 new_i2_cost
, new_i3_cost
, new_cost
);
964 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
970 /* Update the uid_insn_cost array with the replacement costs. */
971 INSN_COST (i2
) = new_i2_cost
;
972 INSN_COST (i3
) = new_i3_cost
;
984 /* Delete any insns that copy a register to itself. */
987 delete_noop_moves (void)
989 rtx_insn
*insn
, *next
;
992 FOR_EACH_BB_FN (bb
, cfun
)
994 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
996 next
= NEXT_INSN (insn
);
997 if (INSN_P (insn
) && noop_move_p (insn
))
1000 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
1002 delete_insn_and_edges (insn
);
1009 /* Return false if we do not want to (or cannot) combine DEF. */
1011 can_combine_def_p (df_ref def
)
1013 /* Do not consider if it is pre/post modification in MEM. */
1014 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
1017 unsigned int regno
= DF_REF_REGNO (def
);
1019 /* Do not combine frame pointer adjustments. */
1020 if ((regno
== FRAME_POINTER_REGNUM
1021 && (!reload_completed
|| frame_pointer_needed
))
1022 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1023 && regno
== HARD_FRAME_POINTER_REGNUM
1024 && (!reload_completed
|| frame_pointer_needed
))
1025 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1026 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
]))
1032 /* Return false if we do not want to (or cannot) combine USE. */
1034 can_combine_use_p (df_ref use
)
1036 /* Do not consider the usage of the stack pointer by function call. */
1037 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1043 /* Fill in log links field for all insns. */
1046 create_log_links (void)
1049 rtx_insn
**next_use
;
1053 next_use
= XCNEWVEC (rtx_insn
*, max_reg_num ());
1055 /* Pass through each block from the end, recording the uses of each
1056 register and establishing log links when def is encountered.
1057 Note that we do not clear next_use array in order to save time,
1058 so we have to test whether the use is in the same basic block as def.
1060 There are a few cases below when we do not consider the definition or
1061 usage -- these are taken from original flow.c did. Don't ask me why it is
1062 done this way; I don't know and if it works, I don't want to know. */
1064 FOR_EACH_BB_FN (bb
, cfun
)
1066 FOR_BB_INSNS_REVERSE (bb
, insn
)
1068 if (!NONDEBUG_INSN_P (insn
))
1071 /* Log links are created only once. */
1072 gcc_assert (!LOG_LINKS (insn
));
1074 FOR_EACH_INSN_DEF (def
, insn
)
1076 unsigned int regno
= DF_REF_REGNO (def
);
1079 if (!next_use
[regno
])
1082 if (!can_combine_def_p (def
))
1085 use_insn
= next_use
[regno
];
1086 next_use
[regno
] = NULL
;
1088 if (BLOCK_FOR_INSN (use_insn
) != bb
)
1093 We don't build a LOG_LINK for hard registers contained
1094 in ASM_OPERANDs. If these registers get replaced,
1095 we might wind up changing the semantics of the insn,
1096 even if reload can make what appear to be valid
1097 assignments later. */
1098 if (regno
< FIRST_PSEUDO_REGISTER
1099 && asm_noperands (PATTERN (use_insn
)) >= 0)
1102 /* Don't add duplicate links between instructions. */
1103 struct insn_link
*links
;
1104 FOR_EACH_LOG_LINK (links
, use_insn
)
1105 if (insn
== links
->insn
&& regno
== links
->regno
)
1109 LOG_LINKS (use_insn
)
1110 = alloc_insn_link (insn
, regno
, LOG_LINKS (use_insn
));
1113 FOR_EACH_INSN_USE (use
, insn
)
1114 if (can_combine_use_p (use
))
1115 next_use
[DF_REF_REGNO (use
)] = insn
;
1122 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1123 true if we found a LOG_LINK that proves that A feeds B. This only works
1124 if there are no instructions between A and B which could have a link
1125 depending on A, since in that case we would not record a link for B.
1126 We also check the implicit dependency created by a cc0 setter/user
1130 insn_a_feeds_b (rtx_insn
*a
, rtx_insn
*b
)
1132 struct insn_link
*links
;
1133 FOR_EACH_LOG_LINK (links
, b
)
1134 if (links
->insn
== a
)
1136 if (HAVE_cc0
&& sets_cc0_p (a
))
1141 /* Main entry point for combiner. F is the first insn of the function.
1142 NREGS is the first unused pseudo-reg number.
1144 Return nonzero if the combiner has turned an indirect jump
1145 instruction into a direct jump. */
1147 combine_instructions (rtx_insn
*f
, unsigned int nregs
)
1149 rtx_insn
*insn
, *next
;
1151 struct insn_link
*links
, *nextlinks
;
1153 basic_block last_bb
;
1155 int new_direct_jump_p
= 0;
1157 for (first
= f
; first
&& !NONDEBUG_INSN_P (first
); )
1158 first
= NEXT_INSN (first
);
1162 combine_attempts
= 0;
1165 combine_successes
= 0;
1167 rtl_hooks
= combine_rtl_hooks
;
1169 reg_stat
.safe_grow_cleared (nregs
);
1171 init_recog_no_volatile ();
1173 /* Allocate array for insn info. */
1174 max_uid_known
= get_max_uid ();
1175 uid_log_links
= XCNEWVEC (struct insn_link
*, max_uid_known
+ 1);
1176 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1177 gcc_obstack_init (&insn_link_obstack
);
1179 nonzero_bits_mode
= int_mode_for_size (HOST_BITS_PER_WIDE_INT
, 0).require ();
1181 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1182 problems when, for example, we have j <<= 1 in a loop. */
1184 nonzero_sign_valid
= 0;
1185 label_tick
= label_tick_ebb_start
= 1;
1187 /* Scan all SETs and see if we can deduce anything about what
1188 bits are known to be zero for some registers and how many copies
1189 of the sign bit are known to exist for those registers.
1191 Also set any known values so that we can use it while searching
1192 for what bits are known to be set. */
1194 setup_incoming_promotions (first
);
1195 /* Allow the entry block and the first block to fall into the same EBB.
1196 Conceptually the incoming promotions are assigned to the entry block. */
1197 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1199 create_log_links ();
1200 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1202 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1207 if (!single_pred_p (this_basic_block
)
1208 || single_pred (this_basic_block
) != last_bb
)
1209 label_tick_ebb_start
= label_tick
;
1210 last_bb
= this_basic_block
;
1212 FOR_BB_INSNS (this_basic_block
, insn
)
1213 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1217 subst_low_luid
= DF_INSN_LUID (insn
);
1220 note_stores (PATTERN (insn
), set_nonzero_bits_and_sign_copies
,
1222 record_dead_and_set_regs (insn
);
1225 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1226 if (REG_NOTE_KIND (links
) == REG_INC
)
1227 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1230 /* Record the current insn_cost of this instruction. */
1231 if (NONJUMP_INSN_P (insn
))
1232 INSN_COST (insn
) = insn_cost (insn
, optimize_this_for_speed_p
);
1235 fprintf (dump_file
, "insn_cost %d for ", INSN_COST (insn
));
1236 dump_insn_slim (dump_file
, insn
);
1241 nonzero_sign_valid
= 1;
1243 /* Now scan all the insns in forward order. */
1244 label_tick
= label_tick_ebb_start
= 1;
1246 setup_incoming_promotions (first
);
1247 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1248 int max_combine
= PARAM_VALUE (PARAM_MAX_COMBINE_INSNS
);
1250 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1252 rtx_insn
*last_combined_insn
= NULL
;
1254 /* Ignore instruction combination in basic blocks that are going to
1255 be removed as unreachable anyway. See PR82386. */
1256 if (EDGE_COUNT (this_basic_block
->preds
) == 0)
1259 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1264 if (!single_pred_p (this_basic_block
)
1265 || single_pred (this_basic_block
) != last_bb
)
1266 label_tick_ebb_start
= label_tick
;
1267 last_bb
= this_basic_block
;
1269 rtl_profile_for_bb (this_basic_block
);
1270 for (insn
= BB_HEAD (this_basic_block
);
1271 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1272 insn
= next
? next
: NEXT_INSN (insn
))
1275 if (!NONDEBUG_INSN_P (insn
))
1278 while (last_combined_insn
1279 && (!NONDEBUG_INSN_P (last_combined_insn
)
1280 || last_combined_insn
->deleted ()))
1281 last_combined_insn
= PREV_INSN (last_combined_insn
);
1282 if (last_combined_insn
== NULL_RTX
1283 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1284 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1285 last_combined_insn
= insn
;
1287 /* See if we know about function return values before this
1288 insn based upon SUBREG flags. */
1289 check_promoted_subreg (insn
, PATTERN (insn
));
1291 /* See if we can find hardregs and subreg of pseudos in
1292 narrower modes. This could help turning TRUNCATEs
1294 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1296 /* Try this insn with each insn it links back to. */
1298 FOR_EACH_LOG_LINK (links
, insn
)
1299 if ((next
= try_combine (insn
, links
->insn
, NULL
,
1300 NULL
, &new_direct_jump_p
,
1301 last_combined_insn
)) != 0)
1303 statistics_counter_event (cfun
, "two-insn combine", 1);
1307 /* Try each sequence of three linked insns ending with this one. */
1309 if (max_combine
>= 3)
1310 FOR_EACH_LOG_LINK (links
, insn
)
1312 rtx_insn
*link
= links
->insn
;
1314 /* If the linked insn has been replaced by a note, then there
1315 is no point in pursuing this chain any further. */
1319 FOR_EACH_LOG_LINK (nextlinks
, link
)
1320 if ((next
= try_combine (insn
, link
, nextlinks
->insn
,
1321 NULL
, &new_direct_jump_p
,
1322 last_combined_insn
)) != 0)
1324 statistics_counter_event (cfun
, "three-insn combine", 1);
1329 /* Try to combine a jump insn that uses CC0
1330 with a preceding insn that sets CC0, and maybe with its
1331 logical predecessor as well.
1332 This is how we make decrement-and-branch insns.
1333 We need this special code because data flow connections
1334 via CC0 do not get entered in LOG_LINKS. */
1338 && (prev
= prev_nonnote_insn (insn
)) != 0
1339 && NONJUMP_INSN_P (prev
)
1340 && sets_cc0_p (PATTERN (prev
)))
1342 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1344 last_combined_insn
)) != 0)
1347 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1348 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1349 NULL
, &new_direct_jump_p
,
1350 last_combined_insn
)) != 0)
1354 /* Do the same for an insn that explicitly references CC0. */
1355 if (HAVE_cc0
&& NONJUMP_INSN_P (insn
)
1356 && (prev
= prev_nonnote_insn (insn
)) != 0
1357 && NONJUMP_INSN_P (prev
)
1358 && sets_cc0_p (PATTERN (prev
))
1359 && GET_CODE (PATTERN (insn
)) == SET
1360 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (insn
))))
1362 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1364 last_combined_insn
)) != 0)
1367 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1368 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1369 NULL
, &new_direct_jump_p
,
1370 last_combined_insn
)) != 0)
1374 /* Finally, see if any of the insns that this insn links to
1375 explicitly references CC0. If so, try this insn, that insn,
1376 and its predecessor if it sets CC0. */
1379 FOR_EACH_LOG_LINK (links
, insn
)
1380 if (NONJUMP_INSN_P (links
->insn
)
1381 && GET_CODE (PATTERN (links
->insn
)) == SET
1382 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (links
->insn
)))
1383 && (prev
= prev_nonnote_insn (links
->insn
)) != 0
1384 && NONJUMP_INSN_P (prev
)
1385 && sets_cc0_p (PATTERN (prev
))
1386 && (next
= try_combine (insn
, links
->insn
,
1387 prev
, NULL
, &new_direct_jump_p
,
1388 last_combined_insn
)) != 0)
1392 /* Try combining an insn with two different insns whose results it
1394 if (max_combine
>= 3)
1395 FOR_EACH_LOG_LINK (links
, insn
)
1396 for (nextlinks
= links
->next
; nextlinks
;
1397 nextlinks
= nextlinks
->next
)
1398 if ((next
= try_combine (insn
, links
->insn
,
1399 nextlinks
->insn
, NULL
,
1401 last_combined_insn
)) != 0)
1404 statistics_counter_event (cfun
, "three-insn combine", 1);
1408 /* Try four-instruction combinations. */
1409 if (max_combine
>= 4)
1410 FOR_EACH_LOG_LINK (links
, insn
)
1412 struct insn_link
*next1
;
1413 rtx_insn
*link
= links
->insn
;
1415 /* If the linked insn has been replaced by a note, then there
1416 is no point in pursuing this chain any further. */
1420 FOR_EACH_LOG_LINK (next1
, link
)
1422 rtx_insn
*link1
= next1
->insn
;
1425 /* I0 -> I1 -> I2 -> I3. */
1426 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1427 if ((next
= try_combine (insn
, link
, link1
,
1430 last_combined_insn
)) != 0)
1432 statistics_counter_event (cfun
, "four-insn combine", 1);
1435 /* I0, I1 -> I2, I2 -> I3. */
1436 for (nextlinks
= next1
->next
; nextlinks
;
1437 nextlinks
= nextlinks
->next
)
1438 if ((next
= try_combine (insn
, link
, link1
,
1441 last_combined_insn
)) != 0)
1443 statistics_counter_event (cfun
, "four-insn combine", 1);
1448 for (next1
= links
->next
; next1
; next1
= next1
->next
)
1450 rtx_insn
*link1
= next1
->insn
;
1453 /* I0 -> I2; I1, I2 -> I3. */
1454 FOR_EACH_LOG_LINK (nextlinks
, link
)
1455 if ((next
= try_combine (insn
, link
, link1
,
1458 last_combined_insn
)) != 0)
1460 statistics_counter_event (cfun
, "four-insn combine", 1);
1463 /* I0 -> I1; I1, I2 -> I3. */
1464 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1465 if ((next
= try_combine (insn
, link
, link1
,
1468 last_combined_insn
)) != 0)
1470 statistics_counter_event (cfun
, "four-insn combine", 1);
1476 /* Try this insn with each REG_EQUAL note it links back to. */
1477 FOR_EACH_LOG_LINK (links
, insn
)
1480 rtx_insn
*temp
= links
->insn
;
1481 if ((set
= single_set (temp
)) != 0
1482 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1483 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1484 /* Avoid using a register that may already been marked
1485 dead by an earlier instruction. */
1486 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1487 && (GET_MODE (note
) == VOIDmode
1488 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1489 : (GET_MODE (SET_DEST (set
)) == GET_MODE (note
)
1490 && (GET_CODE (SET_DEST (set
)) != ZERO_EXTRACT
1491 || (GET_MODE (XEXP (SET_DEST (set
), 0))
1492 == GET_MODE (note
))))))
1494 /* Temporarily replace the set's source with the
1495 contents of the REG_EQUAL note. The insn will
1496 be deleted or recognized by try_combine. */
1497 rtx orig_src
= SET_SRC (set
);
1498 rtx orig_dest
= SET_DEST (set
);
1499 if (GET_CODE (SET_DEST (set
)) == ZERO_EXTRACT
)
1500 SET_DEST (set
) = XEXP (SET_DEST (set
), 0);
1501 SET_SRC (set
) = note
;
1503 i2mod_old_rhs
= copy_rtx (orig_src
);
1504 i2mod_new_rhs
= copy_rtx (note
);
1505 next
= try_combine (insn
, i2mod
, NULL
, NULL
,
1507 last_combined_insn
);
1511 statistics_counter_event (cfun
, "insn-with-note combine", 1);
1514 SET_SRC (set
) = orig_src
;
1515 SET_DEST (set
) = orig_dest
;
1520 record_dead_and_set_regs (insn
);
1527 default_rtl_profile ();
1529 new_direct_jump_p
|= purge_all_dead_edges ();
1530 delete_noop_moves ();
1533 obstack_free (&insn_link_obstack
, NULL
);
1534 free (uid_log_links
);
1535 free (uid_insn_cost
);
1536 reg_stat
.release ();
1539 struct undo
*undo
, *next
;
1540 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1548 total_attempts
+= combine_attempts
;
1549 total_merges
+= combine_merges
;
1550 total_extras
+= combine_extras
;
1551 total_successes
+= combine_successes
;
1553 nonzero_sign_valid
= 0;
1554 rtl_hooks
= general_rtl_hooks
;
1556 /* Make recognizer allow volatile MEMs again. */
1559 return new_direct_jump_p
;
1562 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1565 init_reg_last (void)
1570 FOR_EACH_VEC_ELT (reg_stat
, i
, p
)
1571 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1574 /* Set up any promoted values for incoming argument registers. */
1577 setup_incoming_promotions (rtx_insn
*first
)
1580 bool strictly_local
= false;
1582 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1583 arg
= DECL_CHAIN (arg
))
1585 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1587 machine_mode mode1
, mode2
, mode3
, mode4
;
1589 /* Only continue if the incoming argument is in a register. */
1593 /* Determine, if possible, whether all call sites of the current
1594 function lie within the current compilation unit. (This does
1595 take into account the exporting of a function via taking its
1596 address, and so forth.) */
1597 strictly_local
= cgraph_node::local_info (current_function_decl
)->local
;
1599 /* The mode and signedness of the argument before any promotions happen
1600 (equal to the mode of the pseudo holding it at that stage). */
1601 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1602 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1604 /* The mode and signedness of the argument after any source language and
1605 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1606 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1607 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1609 /* The mode and signedness of the argument as it is actually passed,
1610 see assign_parm_setup_reg in function.c. */
1611 mode3
= promote_function_mode (TREE_TYPE (arg
), mode1
, &uns3
,
1612 TREE_TYPE (cfun
->decl
), 0);
1614 /* The mode of the register in which the argument is being passed. */
1615 mode4
= GET_MODE (reg
);
1617 /* Eliminate sign extensions in the callee when:
1618 (a) A mode promotion has occurred; */
1621 /* (b) The mode of the register is the same as the mode of
1622 the argument as it is passed; */
1625 /* (c) There's no language level extension; */
1628 /* (c.1) All callers are from the current compilation unit. If that's
1629 the case we don't have to rely on an ABI, we only have to know
1630 what we're generating right now, and we know that we will do the
1631 mode1 to mode2 promotion with the given sign. */
1632 else if (!strictly_local
)
1634 /* (c.2) The combination of the two promotions is useful. This is
1635 true when the signs match, or if the first promotion is unsigned.
1636 In the later case, (sign_extend (zero_extend x)) is the same as
1637 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1643 /* Record that the value was promoted from mode1 to mode3,
1644 so that any sign extension at the head of the current
1645 function may be eliminated. */
1646 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1647 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1648 record_value_for_reg (reg
, first
, x
);
1652 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1653 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1654 because some machines (maybe most) will actually do the sign-extension and
1655 this is the conservative approach.
1657 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1661 sign_extend_short_imm (rtx src
, machine_mode mode
, unsigned int prec
)
1663 scalar_int_mode int_mode
;
1664 if (CONST_INT_P (src
)
1665 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1666 && GET_MODE_PRECISION (int_mode
) < prec
1668 && val_signbit_known_set_p (int_mode
, INTVAL (src
)))
1669 src
= GEN_INT (INTVAL (src
) | ~GET_MODE_MASK (int_mode
));
1674 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1678 update_rsp_from_reg_equal (reg_stat_type
*rsp
, rtx_insn
*insn
, const_rtx set
,
1681 rtx reg_equal_note
= insn
? find_reg_equal_equiv_note (insn
) : NULL_RTX
;
1682 unsigned HOST_WIDE_INT bits
= 0;
1683 rtx reg_equal
= NULL
, src
= SET_SRC (set
);
1684 unsigned int num
= 0;
1687 reg_equal
= XEXP (reg_equal_note
, 0);
1689 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
1691 src
= sign_extend_short_imm (src
, GET_MODE (x
), BITS_PER_WORD
);
1693 reg_equal
= sign_extend_short_imm (reg_equal
, GET_MODE (x
), BITS_PER_WORD
);
1696 /* Don't call nonzero_bits if it cannot change anything. */
1697 if (rsp
->nonzero_bits
!= HOST_WIDE_INT_M1U
)
1699 bits
= nonzero_bits (src
, nonzero_bits_mode
);
1700 if (reg_equal
&& bits
)
1701 bits
&= nonzero_bits (reg_equal
, nonzero_bits_mode
);
1702 rsp
->nonzero_bits
|= bits
;
1705 /* Don't call num_sign_bit_copies if it cannot change anything. */
1706 if (rsp
->sign_bit_copies
!= 1)
1708 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1709 if (reg_equal
&& num
!= GET_MODE_PRECISION (GET_MODE (x
)))
1711 unsigned int numeq
= num_sign_bit_copies (reg_equal
, GET_MODE (x
));
1712 if (num
== 0 || numeq
> num
)
1715 if (rsp
->sign_bit_copies
== 0 || num
< rsp
->sign_bit_copies
)
1716 rsp
->sign_bit_copies
= num
;
1720 /* Called via note_stores. If X is a pseudo that is narrower than
1721 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1723 If we are setting only a portion of X and we can't figure out what
1724 portion, assume all bits will be used since we don't know what will
1727 Similarly, set how many bits of X are known to be copies of the sign bit
1728 at all locations in the function. This is the smallest number implied
1732 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1734 rtx_insn
*insn
= (rtx_insn
*) data
;
1735 scalar_int_mode mode
;
1738 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1739 /* If this register is undefined at the start of the file, we can't
1740 say what its contents were. */
1741 && ! REGNO_REG_SET_P
1742 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), REGNO (x
))
1743 && is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
1744 && HWI_COMPUTABLE_MODE_P (mode
))
1746 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
1748 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1750 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1751 rsp
->sign_bit_copies
= 1;
1755 /* If this register is being initialized using itself, and the
1756 register is uninitialized in this basic block, and there are
1757 no LOG_LINKS which set the register, then part of the
1758 register is uninitialized. In that case we can't assume
1759 anything about the number of nonzero bits.
1761 ??? We could do better if we checked this in
1762 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1763 could avoid making assumptions about the insn which initially
1764 sets the register, while still using the information in other
1765 insns. We would have to be careful to check every insn
1766 involved in the combination. */
1769 && reg_referenced_p (x
, PATTERN (insn
))
1770 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1773 struct insn_link
*link
;
1775 FOR_EACH_LOG_LINK (link
, insn
)
1776 if (dead_or_set_p (link
->insn
, x
))
1780 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1781 rsp
->sign_bit_copies
= 1;
1786 /* If this is a complex assignment, see if we can convert it into a
1787 simple assignment. */
1788 set
= expand_field_assignment (set
);
1790 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1791 set what we know about X. */
1793 if (SET_DEST (set
) == x
1794 || (paradoxical_subreg_p (SET_DEST (set
))
1795 && SUBREG_REG (SET_DEST (set
)) == x
))
1796 update_rsp_from_reg_equal (rsp
, insn
, set
, x
);
1799 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1800 rsp
->sign_bit_copies
= 1;
1805 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1806 optionally insns that were previously combined into I3 or that will be
1807 combined into the merger of INSN and I3. The order is PRED, PRED2,
1808 INSN, SUCC, SUCC2, I3.
1810 Return 0 if the combination is not allowed for any reason.
1812 If the combination is allowed, *PDEST will be set to the single
1813 destination of INSN and *PSRC to the single source, and this function
1817 can_combine_p (rtx_insn
*insn
, rtx_insn
*i3
, rtx_insn
*pred ATTRIBUTE_UNUSED
,
1818 rtx_insn
*pred2 ATTRIBUTE_UNUSED
, rtx_insn
*succ
, rtx_insn
*succ2
,
1819 rtx
*pdest
, rtx
*psrc
)
1826 bool all_adjacent
= true;
1827 int (*is_volatile_p
) (const_rtx
);
1833 if (next_active_insn (succ2
) != i3
)
1834 all_adjacent
= false;
1835 if (next_active_insn (succ
) != succ2
)
1836 all_adjacent
= false;
1838 else if (next_active_insn (succ
) != i3
)
1839 all_adjacent
= false;
1840 if (next_active_insn (insn
) != succ
)
1841 all_adjacent
= false;
1843 else if (next_active_insn (insn
) != i3
)
1844 all_adjacent
= false;
1846 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1847 or a PARALLEL consisting of such a SET and CLOBBERs.
1849 If INSN has CLOBBER parallel parts, ignore them for our processing.
1850 By definition, these happen during the execution of the insn. When it
1851 is merged with another insn, all bets are off. If they are, in fact,
1852 needed and aren't also supplied in I3, they may be added by
1853 recog_for_combine. Otherwise, it won't match.
1855 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1858 Get the source and destination of INSN. If more than one, can't
1861 if (GET_CODE (PATTERN (insn
)) == SET
)
1862 set
= PATTERN (insn
);
1863 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1864 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1866 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1868 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1870 switch (GET_CODE (elt
))
1872 /* This is important to combine floating point insns
1873 for the SH4 port. */
1875 /* Combining an isolated USE doesn't make sense.
1876 We depend here on combinable_i3pat to reject them. */
1877 /* The code below this loop only verifies that the inputs of
1878 the SET in INSN do not change. We call reg_set_between_p
1879 to verify that the REG in the USE does not change between
1881 If the USE in INSN was for a pseudo register, the matching
1882 insn pattern will likely match any register; combining this
1883 with any other USE would only be safe if we knew that the
1884 used registers have identical values, or if there was
1885 something to tell them apart, e.g. different modes. For
1886 now, we forgo such complicated tests and simply disallow
1887 combining of USES of pseudo registers with any other USE. */
1888 if (REG_P (XEXP (elt
, 0))
1889 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1891 rtx i3pat
= PATTERN (i3
);
1892 int i
= XVECLEN (i3pat
, 0) - 1;
1893 unsigned int regno
= REGNO (XEXP (elt
, 0));
1897 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1899 if (GET_CODE (i3elt
) == USE
1900 && REG_P (XEXP (i3elt
, 0))
1901 && (REGNO (XEXP (i3elt
, 0)) == regno
1902 ? reg_set_between_p (XEXP (elt
, 0),
1903 PREV_INSN (insn
), i3
)
1904 : regno
>= FIRST_PSEUDO_REGISTER
))
1911 /* We can ignore CLOBBERs. */
1916 /* Ignore SETs whose result isn't used but not those that
1917 have side-effects. */
1918 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1919 && insn_nothrow_p (insn
)
1920 && !side_effects_p (elt
))
1923 /* If we have already found a SET, this is a second one and
1924 so we cannot combine with this insn. */
1932 /* Anything else means we can't combine. */
1938 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1939 so don't do anything with it. */
1940 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1949 /* The simplification in expand_field_assignment may call back to
1950 get_last_value, so set safe guard here. */
1951 subst_low_luid
= DF_INSN_LUID (insn
);
1953 set
= expand_field_assignment (set
);
1954 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1956 /* Do not eliminate user-specified register if it is in an
1957 asm input because we may break the register asm usage defined
1958 in GCC manual if allow to do so.
1959 Be aware that this may cover more cases than we expect but this
1960 should be harmless. */
1961 if (REG_P (dest
) && REG_USERVAR_P (dest
) && HARD_REGISTER_P (dest
)
1962 && extract_asm_operands (PATTERN (i3
)))
1965 /* Don't eliminate a store in the stack pointer. */
1966 if (dest
== stack_pointer_rtx
1967 /* Don't combine with an insn that sets a register to itself if it has
1968 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1969 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1970 /* Can't merge an ASM_OPERANDS. */
1971 || GET_CODE (src
) == ASM_OPERANDS
1972 /* Can't merge a function call. */
1973 || GET_CODE (src
) == CALL
1974 /* Don't eliminate a function call argument. */
1976 && (find_reg_fusage (i3
, USE
, dest
)
1978 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1979 && global_regs
[REGNO (dest
)])))
1980 /* Don't substitute into an incremented register. */
1981 || FIND_REG_INC_NOTE (i3
, dest
)
1982 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1983 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1984 /* Don't substitute into a non-local goto, this confuses CFG. */
1985 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
1986 /* Make sure that DEST is not used after INSN but before SUCC, or
1987 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1990 && (reg_used_between_p (dest
, succ2
, i3
)
1991 || reg_used_between_p (dest
, succ
, succ2
)))
1992 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))
1994 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1995 that case SUCC is not in the insn stream, so use SUCC2
1996 instead for this test. */
1997 && reg_used_between_p (dest
, insn
,
1999 && INSN_UID (succ
) == INSN_UID (succ2
)
2001 /* Make sure that the value that is to be substituted for the register
2002 does not use any registers whose values alter in between. However,
2003 If the insns are adjacent, a use can't cross a set even though we
2004 think it might (this can happen for a sequence of insns each setting
2005 the same destination; last_set of that register might point to
2006 a NOTE). If INSN has a REG_EQUIV note, the register is always
2007 equivalent to the memory so the substitution is valid even if there
2008 are intervening stores. Also, don't move a volatile asm or
2009 UNSPEC_VOLATILE across any other insns. */
2012 || ! find_reg_note (insn
, REG_EQUIV
, src
))
2013 && modified_between_p (src
, insn
, i3
))
2014 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
2015 || GET_CODE (src
) == UNSPEC_VOLATILE
))
2016 /* Don't combine across a CALL_INSN, because that would possibly
2017 change whether the life span of some REGs crosses calls or not,
2018 and it is a pain to update that information.
2019 Exception: if source is a constant, moving it later can't hurt.
2020 Accept that as a special case. */
2021 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
2024 /* DEST must either be a REG or CC0. */
2027 /* If register alignment is being enforced for multi-word items in all
2028 cases except for parameters, it is possible to have a register copy
2029 insn referencing a hard register that is not allowed to contain the
2030 mode being copied and which would not be valid as an operand of most
2031 insns. Eliminate this problem by not combining with such an insn.
2033 Also, on some machines we don't want to extend the life of a hard
2037 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
2038 && !targetm
.hard_regno_mode_ok (REGNO (dest
), GET_MODE (dest
)))
2039 /* Don't extend the life of a hard register unless it is
2040 user variable (if we have few registers) or it can't
2041 fit into the desired register (meaning something special
2043 Also avoid substituting a return register into I3, because
2044 reload can't handle a conflict with constraints of other
2046 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
2047 && !targetm
.hard_regno_mode_ok (REGNO (src
),
2051 else if (GET_CODE (dest
) != CC0
)
2055 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
2056 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
2057 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
2059 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
2061 /* If the clobber represents an earlyclobber operand, we must not
2062 substitute an expression containing the clobbered register.
2063 As we do not analyze the constraint strings here, we have to
2064 make the conservative assumption. However, if the register is
2065 a fixed hard reg, the clobber cannot represent any operand;
2066 we leave it up to the machine description to either accept or
2067 reject use-and-clobber patterns. */
2069 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
2070 || !fixed_regs
[REGNO (reg
)])
2071 if (reg_overlap_mentioned_p (reg
, src
))
2075 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2076 or not), reject, unless nothing volatile comes between it and I3 */
2078 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
2080 /* Make sure neither succ nor succ2 contains a volatile reference. */
2081 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
2083 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
2085 /* We'll check insns between INSN and I3 below. */
2088 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2089 to be an explicit register variable, and was chosen for a reason. */
2091 if (GET_CODE (src
) == ASM_OPERANDS
2092 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
2095 /* If INSN contains volatile references (specifically volatile MEMs),
2096 we cannot combine across any other volatile references.
2097 Even if INSN doesn't contain volatile references, any intervening
2098 volatile insn might affect machine state. */
2100 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
2104 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
2105 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
2108 /* If INSN contains an autoincrement or autodecrement, make sure that
2109 register is not used between there and I3, and not already used in
2110 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2111 Also insist that I3 not be a jump; if it were one
2112 and the incremented register were spilled, we would lose. */
2115 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2116 if (REG_NOTE_KIND (link
) == REG_INC
2118 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
2119 || (pred
!= NULL_RTX
2120 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
2121 || (pred2
!= NULL_RTX
2122 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
2123 || (succ
!= NULL_RTX
2124 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
2125 || (succ2
!= NULL_RTX
2126 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
2127 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
2130 /* Don't combine an insn that follows a CC0-setting insn.
2131 An insn that uses CC0 must not be separated from the one that sets it.
2132 We do, however, allow I2 to follow a CC0-setting insn if that insn
2133 is passed as I1; in that case it will be deleted also.
2134 We also allow combining in this case if all the insns are adjacent
2135 because that would leave the two CC0 insns adjacent as well.
2136 It would be more logical to test whether CC0 occurs inside I1 or I2,
2137 but that would be much slower, and this ought to be equivalent. */
2141 p
= prev_nonnote_insn (insn
);
2142 if (p
&& p
!= pred
&& NONJUMP_INSN_P (p
) && sets_cc0_p (PATTERN (p
))
2147 /* If we get here, we have passed all the tests and the combination is
2156 /* LOC is the location within I3 that contains its pattern or the component
2157 of a PARALLEL of the pattern. We validate that it is valid for combining.
2159 One problem is if I3 modifies its output, as opposed to replacing it
2160 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2161 doing so would produce an insn that is not equivalent to the original insns.
2165 (set (reg:DI 101) (reg:DI 100))
2166 (set (subreg:SI (reg:DI 101) 0) <foo>)
2168 This is NOT equivalent to:
2170 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2171 (set (reg:DI 101) (reg:DI 100))])
2173 Not only does this modify 100 (in which case it might still be valid
2174 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2176 We can also run into a problem if I2 sets a register that I1
2177 uses and I1 gets directly substituted into I3 (not via I2). In that
2178 case, we would be getting the wrong value of I2DEST into I3, so we
2179 must reject the combination. This case occurs when I2 and I1 both
2180 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2181 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2182 of a SET must prevent combination from occurring. The same situation
2183 can occur for I0, in which case I0_NOT_IN_SRC is set.
2185 Before doing the above check, we first try to expand a field assignment
2186 into a set of logical operations.
2188 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2189 we place a register that is both set and used within I3. If more than one
2190 such register is detected, we fail.
2192 Return 1 if the combination is valid, zero otherwise. */
2195 combinable_i3pat (rtx_insn
*i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2196 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2200 if (GET_CODE (x
) == SET
)
2203 rtx dest
= SET_DEST (set
);
2204 rtx src
= SET_SRC (set
);
2205 rtx inner_dest
= dest
;
2208 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2209 || GET_CODE (inner_dest
) == SUBREG
2210 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2211 inner_dest
= XEXP (inner_dest
, 0);
2213 /* Check for the case where I3 modifies its output, as discussed
2214 above. We don't want to prevent pseudos from being combined
2215 into the address of a MEM, so only prevent the combination if
2216 i1 or i2 set the same MEM. */
2217 if ((inner_dest
!= dest
&&
2218 (!MEM_P (inner_dest
)
2219 || rtx_equal_p (i2dest
, inner_dest
)
2220 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2221 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2222 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2223 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2224 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2226 /* This is the same test done in can_combine_p except we can't test
2227 all_adjacent; we don't have to, since this instruction will stay
2228 in place, thus we are not considering increasing the lifetime of
2231 Also, if this insn sets a function argument, combining it with
2232 something that might need a spill could clobber a previous
2233 function argument; the all_adjacent test in can_combine_p also
2234 checks this; here, we do a more specific test for this case. */
2236 || (REG_P (inner_dest
)
2237 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2238 && !targetm
.hard_regno_mode_ok (REGNO (inner_dest
),
2239 GET_MODE (inner_dest
)))
2240 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2241 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2244 /* If DEST is used in I3, it is being killed in this insn, so
2245 record that for later. We have to consider paradoxical
2246 subregs here, since they kill the whole register, but we
2247 ignore partial subregs, STRICT_LOW_PART, etc.
2248 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2249 STACK_POINTER_REGNUM, since these are always considered to be
2250 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2252 if (GET_CODE (subdest
) == SUBREG
&& !partial_subreg_p (subdest
))
2253 subdest
= SUBREG_REG (subdest
);
2256 && reg_referenced_p (subdest
, PATTERN (i3
))
2257 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2258 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2259 || REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
)
2260 && (FRAME_POINTER_REGNUM
== ARG_POINTER_REGNUM
2261 || (REGNO (subdest
) != ARG_POINTER_REGNUM
2262 || ! fixed_regs
[REGNO (subdest
)]))
2263 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2265 if (*pi3dest_killed
)
2268 *pi3dest_killed
= subdest
;
2272 else if (GET_CODE (x
) == PARALLEL
)
2276 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2277 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2278 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2285 /* Return 1 if X is an arithmetic expression that contains a multiplication
2286 and division. We don't count multiplications by powers of two here. */
2289 contains_muldiv (rtx x
)
2291 switch (GET_CODE (x
))
2293 case MOD
: case DIV
: case UMOD
: case UDIV
:
2297 return ! (CONST_INT_P (XEXP (x
, 1))
2298 && pow2p_hwi (UINTVAL (XEXP (x
, 1))));
2301 return contains_muldiv (XEXP (x
, 0))
2302 || contains_muldiv (XEXP (x
, 1));
2305 return contains_muldiv (XEXP (x
, 0));
2311 /* Determine whether INSN can be used in a combination. Return nonzero if
2312 not. This is used in try_combine to detect early some cases where we
2313 can't perform combinations. */
2316 cant_combine_insn_p (rtx_insn
*insn
)
2321 /* If this isn't really an insn, we can't do anything.
2322 This can occur when flow deletes an insn that it has merged into an
2323 auto-increment address. */
2324 if (!NONDEBUG_INSN_P (insn
))
2327 /* Never combine loads and stores involving hard regs that are likely
2328 to be spilled. The register allocator can usually handle such
2329 reg-reg moves by tying. If we allow the combiner to make
2330 substitutions of likely-spilled regs, reload might die.
2331 As an exception, we allow combinations involving fixed regs; these are
2332 not available to the register allocator so there's no risk involved. */
2334 set
= single_set (insn
);
2337 src
= SET_SRC (set
);
2338 dest
= SET_DEST (set
);
2339 if (GET_CODE (src
) == SUBREG
)
2340 src
= SUBREG_REG (src
);
2341 if (GET_CODE (dest
) == SUBREG
)
2342 dest
= SUBREG_REG (dest
);
2343 if (REG_P (src
) && REG_P (dest
)
2344 && ((HARD_REGISTER_P (src
)
2345 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2346 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src
))))
2347 || (HARD_REGISTER_P (dest
)
2348 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2349 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2355 struct likely_spilled_retval_info
2357 unsigned regno
, nregs
;
2361 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2362 hard registers that are known to be written to / clobbered in full. */
2364 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2366 struct likely_spilled_retval_info
*const info
=
2367 (struct likely_spilled_retval_info
*) data
;
2368 unsigned regno
, nregs
;
2371 if (!REG_P (XEXP (set
, 0)))
2374 if (regno
>= info
->regno
+ info
->nregs
)
2376 nregs
= REG_NREGS (x
);
2377 if (regno
+ nregs
<= info
->regno
)
2379 new_mask
= (2U << (nregs
- 1)) - 1;
2380 if (regno
< info
->regno
)
2381 new_mask
>>= info
->regno
- regno
;
2383 new_mask
<<= regno
- info
->regno
;
2384 info
->mask
&= ~new_mask
;
2387 /* Return nonzero iff part of the return value is live during INSN, and
2388 it is likely spilled. This can happen when more than one insn is needed
2389 to copy the return value, e.g. when we consider to combine into the
2390 second copy insn for a complex value. */
2393 likely_spilled_retval_p (rtx_insn
*insn
)
2395 rtx_insn
*use
= BB_END (this_basic_block
);
2398 unsigned regno
, nregs
;
2399 /* We assume here that no machine mode needs more than
2400 32 hard registers when the value overlaps with a register
2401 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2403 struct likely_spilled_retval_info info
;
2405 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2407 reg
= XEXP (PATTERN (use
), 0);
2408 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2410 regno
= REGNO (reg
);
2411 nregs
= REG_NREGS (reg
);
2414 mask
= (2U << (nregs
- 1)) - 1;
2416 /* Disregard parts of the return value that are set later. */
2420 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2422 note_stores (PATTERN (p
), likely_spilled_retval_1
, &info
);
2425 /* Check if any of the (probably) live return value registers is
2430 if ((mask
& 1 << nregs
)
2431 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2437 /* Adjust INSN after we made a change to its destination.
2439 Changing the destination can invalidate notes that say something about
2440 the results of the insn and a LOG_LINK pointing to the insn. */
2443 adjust_for_new_dest (rtx_insn
*insn
)
2445 /* For notes, be conservative and simply remove them. */
2446 remove_reg_equal_equiv_notes (insn
);
2448 /* The new insn will have a destination that was previously the destination
2449 of an insn just above it. Call distribute_links to make a LOG_LINK from
2450 the next use of that destination. */
2452 rtx set
= single_set (insn
);
2455 rtx reg
= SET_DEST (set
);
2457 while (GET_CODE (reg
) == ZERO_EXTRACT
2458 || GET_CODE (reg
) == STRICT_LOW_PART
2459 || GET_CODE (reg
) == SUBREG
)
2460 reg
= XEXP (reg
, 0);
2461 gcc_assert (REG_P (reg
));
2463 distribute_links (alloc_insn_link (insn
, REGNO (reg
), NULL
));
2465 df_insn_rescan (insn
);
2468 /* Return TRUE if combine can reuse reg X in mode MODE.
2469 ADDED_SETS is nonzero if the original set is still required. */
2471 can_change_dest_mode (rtx x
, int added_sets
, machine_mode mode
)
2478 /* Don't change between modes with different underlying register sizes,
2479 since this could lead to invalid subregs. */
2480 if (REGMODE_NATURAL_SIZE (mode
)
2481 != REGMODE_NATURAL_SIZE (GET_MODE (x
)))
2485 /* Allow hard registers if the new mode is legal, and occupies no more
2486 registers than the old mode. */
2487 if (regno
< FIRST_PSEUDO_REGISTER
)
2488 return (targetm
.hard_regno_mode_ok (regno
, mode
)
2489 && REG_NREGS (x
) >= hard_regno_nregs (regno
, mode
));
2491 /* Or a pseudo that is only used once. */
2492 return (regno
< reg_n_sets_max
2493 && REG_N_SETS (regno
) == 1
2495 && !REG_USERVAR_P (x
));
2499 /* Check whether X, the destination of a set, refers to part of
2500 the register specified by REG. */
2503 reg_subword_p (rtx x
, rtx reg
)
2505 /* Check that reg is an integer mode register. */
2506 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2509 if (GET_CODE (x
) == STRICT_LOW_PART
2510 || GET_CODE (x
) == ZERO_EXTRACT
)
2513 return GET_CODE (x
) == SUBREG
2514 && SUBREG_REG (x
) == reg
2515 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2518 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2519 Note that the INSN should be deleted *after* removing dead edges, so
2520 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2521 but not for a (set (pc) (label_ref FOO)). */
2524 update_cfg_for_uncondjump (rtx_insn
*insn
)
2526 basic_block bb
= BLOCK_FOR_INSN (insn
);
2527 gcc_assert (BB_END (bb
) == insn
);
2529 purge_dead_edges (bb
);
2532 if (EDGE_COUNT (bb
->succs
) == 1)
2536 single_succ_edge (bb
)->flags
|= EDGE_FALLTHRU
;
2538 /* Remove barriers from the footer if there are any. */
2539 for (insn
= BB_FOOTER (bb
); insn
; insn
= NEXT_INSN (insn
))
2540 if (BARRIER_P (insn
))
2542 if (PREV_INSN (insn
))
2543 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
2545 BB_FOOTER (bb
) = NEXT_INSN (insn
);
2546 if (NEXT_INSN (insn
))
2547 SET_PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
2549 else if (LABEL_P (insn
))
2554 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2555 by an arbitrary number of CLOBBERs. */
2557 is_parallel_of_n_reg_sets (rtx pat
, int n
)
2559 if (GET_CODE (pat
) != PARALLEL
)
2562 int len
= XVECLEN (pat
, 0);
2567 for (i
= 0; i
< n
; i
++)
2568 if (GET_CODE (XVECEXP (pat
, 0, i
)) != SET
2569 || !REG_P (SET_DEST (XVECEXP (pat
, 0, i
))))
2571 for ( ; i
< len
; i
++)
2572 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
2573 || XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
2579 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2580 CLOBBERs), can be split into individual SETs in that order, without
2581 changing semantics. */
2583 can_split_parallel_of_n_reg_sets (rtx_insn
*insn
, int n
)
2585 if (!insn_nothrow_p (insn
))
2588 rtx pat
= PATTERN (insn
);
2591 for (i
= 0; i
< n
; i
++)
2593 if (side_effects_p (SET_SRC (XVECEXP (pat
, 0, i
))))
2596 rtx reg
= SET_DEST (XVECEXP (pat
, 0, i
));
2598 for (j
= i
+ 1; j
< n
; j
++)
2599 if (reg_referenced_p (reg
, XVECEXP (pat
, 0, j
)))
2606 /* Try to combine the insns I0, I1 and I2 into I3.
2607 Here I0, I1 and I2 appear earlier than I3.
2608 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2611 If we are combining more than two insns and the resulting insn is not
2612 recognized, try splitting it into two insns. If that happens, I2 and I3
2613 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2614 Otherwise, I0, I1 and I2 are pseudo-deleted.
2616 Return 0 if the combination does not work. Then nothing is changed.
2617 If we did the combination, return the insn at which combine should
2620 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2621 new direct jump instruction.
2623 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2624 been I3 passed to an earlier try_combine within the same basic
2628 try_combine (rtx_insn
*i3
, rtx_insn
*i2
, rtx_insn
*i1
, rtx_insn
*i0
,
2629 int *new_direct_jump_p
, rtx_insn
*last_combined_insn
)
2631 /* New patterns for I3 and I2, respectively. */
2632 rtx newpat
, newi2pat
= 0;
2633 rtvec newpat_vec_with_clobbers
= 0;
2634 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2635 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2637 int added_sets_0
, added_sets_1
, added_sets_2
;
2638 /* Total number of SETs to put into I3. */
2640 /* Nonzero if I2's or I1's body now appears in I3. */
2641 int i2_is_used
= 0, i1_is_used
= 0;
2642 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2643 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2644 /* Contains I3 if the destination of I3 is used in its source, which means
2645 that the old life of I3 is being killed. If that usage is placed into
2646 I2 and not in I3, a REG_DEAD note must be made. */
2647 rtx i3dest_killed
= 0;
2648 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2649 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2650 /* Copy of SET_SRC of I1 and I0, if needed. */
2651 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2652 /* Set if I2DEST was reused as a scratch register. */
2653 bool i2scratch
= false;
2654 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2655 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2656 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2657 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2658 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2659 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2660 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2661 /* Notes that must be added to REG_NOTES in I3 and I2. */
2662 rtx new_i3_notes
, new_i2_notes
;
2663 /* Notes that we substituted I3 into I2 instead of the normal case. */
2664 int i3_subst_into_i2
= 0;
2665 /* Notes that I1, I2 or I3 is a MULT operation. */
2668 int changed_i3_dest
= 0;
2671 rtx_insn
*temp_insn
;
2673 struct insn_link
*link
;
2675 rtx new_other_notes
;
2677 scalar_int_mode dest_mode
, temp_mode
;
2679 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2681 if (i1
== i2
|| i0
== i2
|| (i0
&& i0
== i1
))
2684 /* Only try four-insn combinations when there's high likelihood of
2685 success. Look for simple insns, such as loads of constants or
2686 binary operations involving a constant. */
2694 if (!flag_expensive_optimizations
)
2697 for (i
= 0; i
< 4; i
++)
2699 rtx_insn
*insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2700 rtx set
= single_set (insn
);
2704 src
= SET_SRC (set
);
2705 if (CONSTANT_P (src
))
2710 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2712 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2713 || GET_CODE (src
) == LSHIFTRT
)
2717 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2718 are likely manipulating its value. Ideally we'll be able to combine
2719 all four insns into a bitfield insertion of some kind.
2721 Note the source in I0 might be inside a sign/zero extension and the
2722 memory modes in I0 and I3 might be different. So extract the address
2723 from the destination of I3 and search for it in the source of I0.
2725 In the event that there's a match but the source/dest do not actually
2726 refer to the same memory, the worst that happens is we try some
2727 combinations that we wouldn't have otherwise. */
2728 if ((set0
= single_set (i0
))
2729 /* Ensure the source of SET0 is a MEM, possibly buried inside
2731 && (GET_CODE (SET_SRC (set0
)) == MEM
2732 || ((GET_CODE (SET_SRC (set0
)) == ZERO_EXTEND
2733 || GET_CODE (SET_SRC (set0
)) == SIGN_EXTEND
)
2734 && GET_CODE (XEXP (SET_SRC (set0
), 0)) == MEM
))
2735 && (set3
= single_set (i3
))
2736 /* Ensure the destination of SET3 is a MEM. */
2737 && GET_CODE (SET_DEST (set3
)) == MEM
2738 /* Would it be better to extract the base address for the MEM
2739 in SET3 and look for that? I don't have cases where it matters
2740 but I could envision such cases. */
2741 && rtx_referenced_p (XEXP (SET_DEST (set3
), 0), SET_SRC (set0
)))
2744 if (ngood
< 2 && nshift
< 2)
2748 /* Exit early if one of the insns involved can't be used for
2751 || (i1
&& CALL_P (i1
))
2752 || (i0
&& CALL_P (i0
))
2753 || cant_combine_insn_p (i3
)
2754 || cant_combine_insn_p (i2
)
2755 || (i1
&& cant_combine_insn_p (i1
))
2756 || (i0
&& cant_combine_insn_p (i0
))
2757 || likely_spilled_retval_p (i3
))
2761 undobuf
.other_insn
= 0;
2763 /* Reset the hard register usage information. */
2764 CLEAR_HARD_REG_SET (newpat_used_regs
);
2766 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2769 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2770 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2772 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2773 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2775 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2776 INSN_UID (i2
), INSN_UID (i3
));
2779 dump_insn_slim (dump_file
, i0
);
2781 dump_insn_slim (dump_file
, i1
);
2782 dump_insn_slim (dump_file
, i2
);
2783 dump_insn_slim (dump_file
, i3
);
2786 /* If multiple insns feed into one of I2 or I3, they can be in any
2787 order. To simplify the code below, reorder them in sequence. */
2788 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2790 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2792 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2795 added_links_insn
= 0;
2796 added_notes_insn
= 0;
2798 /* First check for one important special case that the code below will
2799 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2800 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2801 we may be able to replace that destination with the destination of I3.
2802 This occurs in the common code where we compute both a quotient and
2803 remainder into a structure, in which case we want to do the computation
2804 directly into the structure to avoid register-register copies.
2806 Note that this case handles both multiple sets in I2 and also cases
2807 where I2 has a number of CLOBBERs inside the PARALLEL.
2809 We make very conservative checks below and only try to handle the
2810 most common cases of this. For example, we only handle the case
2811 where I2 and I3 are adjacent to avoid making difficult register
2814 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2815 && REG_P (SET_SRC (PATTERN (i3
)))
2816 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2817 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2818 && GET_CODE (PATTERN (i2
)) == PARALLEL
2819 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2820 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2821 below would need to check what is inside (and reg_overlap_mentioned_p
2822 doesn't support those codes anyway). Don't allow those destinations;
2823 the resulting insn isn't likely to be recognized anyway. */
2824 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2825 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2826 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2827 SET_DEST (PATTERN (i3
)))
2828 && next_active_insn (i2
) == i3
)
2830 rtx p2
= PATTERN (i2
);
2832 /* Make sure that the destination of I3,
2833 which we are going to substitute into one output of I2,
2834 is not used within another output of I2. We must avoid making this:
2835 (parallel [(set (mem (reg 69)) ...)
2836 (set (reg 69) ...)])
2837 which is not well-defined as to order of actions.
2838 (Besides, reload can't handle output reloads for this.)
2840 The problem can also happen if the dest of I3 is a memory ref,
2841 if another dest in I2 is an indirect memory ref.
2843 Neither can this PARALLEL be an asm. We do not allow combining
2844 that usually (see can_combine_p), so do not here either. */
2846 for (i
= 0; ok
&& i
< XVECLEN (p2
, 0); i
++)
2848 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2849 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
)
2850 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2851 SET_DEST (XVECEXP (p2
, 0, i
))))
2853 else if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2854 && GET_CODE (SET_SRC (XVECEXP (p2
, 0, i
))) == ASM_OPERANDS
)
2859 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2860 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2861 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2866 subst_low_luid
= DF_INSN_LUID (i2
);
2868 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2869 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2870 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2871 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2873 /* Replace the dest in I2 with our dest and make the resulting
2874 insn the new pattern for I3. Then skip to where we validate
2875 the pattern. Everything was set up above. */
2876 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2878 i3_subst_into_i2
= 1;
2879 goto validate_replacement
;
2883 /* If I2 is setting a pseudo to a constant and I3 is setting some
2884 sub-part of it to another constant, merge them by making a new
2887 && (temp_expr
= single_set (i2
)) != 0
2888 && is_a
<scalar_int_mode
> (GET_MODE (SET_DEST (temp_expr
)), &temp_mode
)
2889 && CONST_SCALAR_INT_P (SET_SRC (temp_expr
))
2890 && GET_CODE (PATTERN (i3
)) == SET
2891 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3
)))
2892 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp_expr
)))
2894 rtx dest
= SET_DEST (PATTERN (i3
));
2895 rtx temp_dest
= SET_DEST (temp_expr
);
2899 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2901 if (CONST_INT_P (XEXP (dest
, 1))
2902 && CONST_INT_P (XEXP (dest
, 2))
2903 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (dest
, 0)),
2906 width
= INTVAL (XEXP (dest
, 1));
2907 offset
= INTVAL (XEXP (dest
, 2));
2908 dest
= XEXP (dest
, 0);
2909 if (BITS_BIG_ENDIAN
)
2910 offset
= GET_MODE_PRECISION (dest_mode
) - width
- offset
;
2915 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2916 dest
= XEXP (dest
, 0);
2917 if (is_a
<scalar_int_mode
> (GET_MODE (dest
), &dest_mode
))
2919 width
= GET_MODE_PRECISION (dest_mode
);
2926 /* If this is the low part, we're done. */
2927 if (subreg_lowpart_p (dest
))
2929 /* Handle the case where inner is twice the size of outer. */
2930 else if (GET_MODE_PRECISION (temp_mode
)
2931 == 2 * GET_MODE_PRECISION (dest_mode
))
2932 offset
+= GET_MODE_PRECISION (dest_mode
);
2933 /* Otherwise give up for now. */
2940 rtx inner
= SET_SRC (PATTERN (i3
));
2941 rtx outer
= SET_SRC (temp_expr
);
2943 wide_int o
= wi::insert (rtx_mode_t (outer
, temp_mode
),
2944 rtx_mode_t (inner
, dest_mode
),
2949 subst_low_luid
= DF_INSN_LUID (i2
);
2950 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2952 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2954 /* Replace the source in I2 with the new constant and make the
2955 resulting insn the new pattern for I3. Then skip to where we
2956 validate the pattern. Everything was set up above. */
2957 SUBST (SET_SRC (temp_expr
),
2958 immed_wide_int_const (o
, temp_mode
));
2960 newpat
= PATTERN (i2
);
2962 /* The dest of I3 has been replaced with the dest of I2. */
2963 changed_i3_dest
= 1;
2964 goto validate_replacement
;
2968 /* If we have no I1 and I2 looks like:
2969 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2971 make up a dummy I1 that is
2974 (set (reg:CC X) (compare:CC Y (const_int 0)))
2976 (We can ignore any trailing CLOBBERs.)
2978 This undoes a previous combination and allows us to match a branch-and-
2981 if (!HAVE_cc0
&& i1
== 0
2982 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2983 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
2985 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
2986 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
2987 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
2988 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1)))
2989 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2990 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2992 /* We make I1 with the same INSN_UID as I2. This gives it
2993 the same DF_INSN_LUID for value tracking. Our fake I1 will
2994 never appear in the insn stream so giving it the same INSN_UID
2995 as I2 will not cause a problem. */
2997 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2998 XVECEXP (PATTERN (i2
), 0, 1), INSN_LOCATION (i2
),
3000 INSN_UID (i1
) = INSN_UID (i2
);
3002 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
3003 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
3004 SET_DEST (PATTERN (i1
)));
3005 unsigned int regno
= REGNO (SET_DEST (PATTERN (i1
)));
3006 SUBST_LINK (LOG_LINKS (i2
),
3007 alloc_insn_link (i1
, regno
, LOG_LINKS (i2
)));
3010 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3011 make those two SETs separate I1 and I2 insns, and make an I0 that is
3013 if (!HAVE_cc0
&& i0
== 0
3014 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
3015 && can_split_parallel_of_n_reg_sets (i2
, 2)
3016 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
3017 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
)
3018 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
3019 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
3021 /* If there is no I1, there is no I0 either. */
3024 /* We make I1 with the same INSN_UID as I2. This gives it
3025 the same DF_INSN_LUID for value tracking. Our fake I1 will
3026 never appear in the insn stream so giving it the same INSN_UID
3027 as I2 will not cause a problem. */
3029 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
3030 XVECEXP (PATTERN (i2
), 0, 0), INSN_LOCATION (i2
),
3032 INSN_UID (i1
) = INSN_UID (i2
);
3034 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 1));
3037 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
3038 if (!can_combine_p (i2
, i3
, i0
, i1
, NULL
, NULL
, &i2dest
, &i2src
))
3041 fprintf (dump_file
, "Can't combine i2 into i3\n");
3045 if (i1
&& !can_combine_p (i1
, i3
, i0
, NULL
, i2
, NULL
, &i1dest
, &i1src
))
3048 fprintf (dump_file
, "Can't combine i1 into i3\n");
3052 if (i0
&& !can_combine_p (i0
, i3
, NULL
, NULL
, i1
, i2
, &i0dest
, &i0src
))
3055 fprintf (dump_file
, "Can't combine i0 into i3\n");
3060 /* Record whether I2DEST is used in I2SRC and similarly for the other
3061 cases. Knowing this will help in register status updating below. */
3062 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
3063 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
3064 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
3065 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
3066 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
3067 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
3068 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
3069 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
3070 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
3072 /* For the earlier insns, determine which of the subsequent ones they
3074 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
3075 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
3076 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
3077 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
3078 && reg_overlap_mentioned_p (i0dest
, i2src
))));
3080 /* Ensure that I3's pattern can be the destination of combines. */
3081 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
3082 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
3083 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
3084 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
3091 /* See if any of the insns is a MULT operation. Unless one is, we will
3092 reject a combination that is, since it must be slower. Be conservative
3094 if (GET_CODE (i2src
) == MULT
3095 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
3096 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
3097 || (GET_CODE (PATTERN (i3
)) == SET
3098 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
3101 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3102 We used to do this EXCEPT in one case: I3 has a post-inc in an
3103 output operand. However, that exception can give rise to insns like
3105 which is a famous insn on the PDP-11 where the value of r3 used as the
3106 source was model-dependent. Avoid this sort of thing. */
3109 if (!(GET_CODE (PATTERN (i3
)) == SET
3110 && REG_P (SET_SRC (PATTERN (i3
)))
3111 && MEM_P (SET_DEST (PATTERN (i3
)))
3112 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
3113 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
3114 /* It's not the exception. */
3119 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
3120 if (REG_NOTE_KIND (link
) == REG_INC
3121 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
3123 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
3130 /* See if the SETs in I1 or I2 need to be kept around in the merged
3131 instruction: whenever the value set there is still needed past I3.
3132 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3134 For the SET in I1, we have two cases: if I1 and I2 independently feed
3135 into I3, the set in I1 needs to be kept around unless I1DEST dies
3136 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3137 in I1 needs to be kept around unless I1DEST dies or is set in either
3138 I2 or I3. The same considerations apply to I0. */
3140 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
3143 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
3144 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
3149 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
3150 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
))
3151 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3152 && dead_or_set_p (i2
, i0dest
)));
3156 /* We are about to copy insns for the case where they need to be kept
3157 around. Check that they can be copied in the merged instruction. */
3159 if (targetm
.cannot_copy_insn_p
3160 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
3161 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
3162 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
3168 /* If the set in I2 needs to be kept around, we must make a copy of
3169 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3170 PATTERN (I2), we are only substituting for the original I1DEST, not into
3171 an already-substituted copy. This also prevents making self-referential
3172 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3177 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3178 i2pat
= gen_rtx_SET (i2dest
, copy_rtx (i2src
));
3180 i2pat
= copy_rtx (PATTERN (i2
));
3185 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3186 i1pat
= gen_rtx_SET (i1dest
, copy_rtx (i1src
));
3188 i1pat
= copy_rtx (PATTERN (i1
));
3193 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3194 i0pat
= gen_rtx_SET (i0dest
, copy_rtx (i0src
));
3196 i0pat
= copy_rtx (PATTERN (i0
));
3201 /* Substitute in the latest insn for the regs set by the earlier ones. */
3203 maxreg
= max_reg_num ();
3207 /* Many machines that don't use CC0 have insns that can both perform an
3208 arithmetic operation and set the condition code. These operations will
3209 be represented as a PARALLEL with the first element of the vector
3210 being a COMPARE of an arithmetic operation with the constant zero.
3211 The second element of the vector will set some pseudo to the result
3212 of the same arithmetic operation. If we simplify the COMPARE, we won't
3213 match such a pattern and so will generate an extra insn. Here we test
3214 for this case, where both the comparison and the operation result are
3215 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3216 I2SRC. Later we will make the PARALLEL that contains I2. */
3218 if (!HAVE_cc0
&& i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3219 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3220 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3
)), 1))
3221 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3224 rtx
*cc_use_loc
= NULL
;
3225 rtx_insn
*cc_use_insn
= NULL
;
3226 rtx op0
= i2src
, op1
= XEXP (SET_SRC (PATTERN (i3
)), 1);
3227 machine_mode compare_mode
, orig_compare_mode
;
3228 enum rtx_code compare_code
= UNKNOWN
, orig_compare_code
= UNKNOWN
;
3229 scalar_int_mode mode
;
3231 newpat
= PATTERN (i3
);
3232 newpat_dest
= SET_DEST (newpat
);
3233 compare_mode
= orig_compare_mode
= GET_MODE (newpat_dest
);
3235 if (undobuf
.other_insn
== 0
3236 && (cc_use_loc
= find_single_use (SET_DEST (newpat
), i3
,
3239 compare_code
= orig_compare_code
= GET_CODE (*cc_use_loc
);
3240 if (is_a
<scalar_int_mode
> (GET_MODE (i2dest
), &mode
))
3241 compare_code
= simplify_compare_const (compare_code
, mode
,
3243 target_canonicalize_comparison (&compare_code
, &op0
, &op1
, 1);
3246 /* Do the rest only if op1 is const0_rtx, which may be the
3247 result of simplification. */
3248 if (op1
== const0_rtx
)
3250 /* If a single use of the CC is found, prepare to modify it
3251 when SELECT_CC_MODE returns a new CC-class mode, or when
3252 the above simplify_compare_const() returned a new comparison
3253 operator. undobuf.other_insn is assigned the CC use insn
3254 when modifying it. */
3257 #ifdef SELECT_CC_MODE
3258 machine_mode new_mode
3259 = SELECT_CC_MODE (compare_code
, op0
, op1
);
3260 if (new_mode
!= orig_compare_mode
3261 && can_change_dest_mode (SET_DEST (newpat
),
3262 added_sets_2
, new_mode
))
3264 unsigned int regno
= REGNO (newpat_dest
);
3265 compare_mode
= new_mode
;
3266 if (regno
< FIRST_PSEUDO_REGISTER
)
3267 newpat_dest
= gen_rtx_REG (compare_mode
, regno
);
3270 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3271 newpat_dest
= regno_reg_rtx
[regno
];
3275 /* Cases for modifying the CC-using comparison. */
3276 if (compare_code
!= orig_compare_code
3277 /* ??? Do we need to verify the zero rtx? */
3278 && XEXP (*cc_use_loc
, 1) == const0_rtx
)
3280 /* Replace cc_use_loc with entire new RTX. */
3282 gen_rtx_fmt_ee (compare_code
, compare_mode
,
3283 newpat_dest
, const0_rtx
));
3284 undobuf
.other_insn
= cc_use_insn
;
3286 else if (compare_mode
!= orig_compare_mode
)
3288 /* Just replace the CC reg with a new mode. */
3289 SUBST (XEXP (*cc_use_loc
, 0), newpat_dest
);
3290 undobuf
.other_insn
= cc_use_insn
;
3294 /* Now we modify the current newpat:
3295 First, SET_DEST(newpat) is updated if the CC mode has been
3296 altered. For targets without SELECT_CC_MODE, this should be
3298 if (compare_mode
!= orig_compare_mode
)
3299 SUBST (SET_DEST (newpat
), newpat_dest
);
3300 /* This is always done to propagate i2src into newpat. */
3301 SUBST (SET_SRC (newpat
),
3302 gen_rtx_COMPARE (compare_mode
, op0
, op1
));
3303 /* Create new version of i2pat if needed; the below PARALLEL
3304 creation needs this to work correctly. */
3305 if (! rtx_equal_p (i2src
, op0
))
3306 i2pat
= gen_rtx_SET (i2dest
, op0
);
3311 if (i2_is_used
== 0)
3313 /* It is possible that the source of I2 or I1 may be performing
3314 an unneeded operation, such as a ZERO_EXTEND of something
3315 that is known to have the high part zero. Handle that case
3316 by letting subst look at the inner insns.
3318 Another way to do this would be to have a function that tries
3319 to simplify a single insn instead of merging two or more
3320 insns. We don't do this because of the potential of infinite
3321 loops and because of the potential extra memory required.
3322 However, doing it the way we are is a bit of a kludge and
3323 doesn't catch all cases.
3325 But only do this if -fexpensive-optimizations since it slows
3326 things down and doesn't usually win.
3328 This is not done in the COMPARE case above because the
3329 unmodified I2PAT is used in the PARALLEL and so a pattern
3330 with a modified I2SRC would not match. */
3332 if (flag_expensive_optimizations
)
3334 /* Pass pc_rtx so no substitutions are done, just
3338 subst_low_luid
= DF_INSN_LUID (i1
);
3339 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3342 subst_low_luid
= DF_INSN_LUID (i2
);
3343 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3346 n_occurrences
= 0; /* `subst' counts here */
3347 subst_low_luid
= DF_INSN_LUID (i2
);
3349 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3350 copy of I2SRC each time we substitute it, in order to avoid creating
3351 self-referential RTL when we will be substituting I1SRC for I1DEST
3352 later. Likewise if I0 feeds into I2, either directly or indirectly
3353 through I1, and I0DEST is in I0SRC. */
3354 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0, 0,
3355 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3356 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3357 && i0dest_in_i0src
));
3360 /* Record whether I2's body now appears within I3's body. */
3361 i2_is_used
= n_occurrences
;
3364 /* If we already got a failure, don't try to do more. Otherwise, try to
3365 substitute I1 if we have it. */
3367 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3369 /* Check that an autoincrement side-effect on I1 has not been lost.
3370 This happens if I1DEST is mentioned in I2 and dies there, and
3371 has disappeared from the new pattern. */
3372 if ((FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3374 && dead_or_set_p (i2
, i1dest
)
3375 && !reg_overlap_mentioned_p (i1dest
, newpat
))
3376 /* Before we can do this substitution, we must redo the test done
3377 above (see detailed comments there) that ensures I1DEST isn't
3378 mentioned in any SETs in NEWPAT that are field assignments. */
3379 || !combinable_i3pat (NULL
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3387 subst_low_luid
= DF_INSN_LUID (i1
);
3389 /* If the following substitution will modify I1SRC, make a copy of it
3390 for the case where it is substituted for I1DEST in I2PAT later. */
3391 if (added_sets_2
&& i1_feeds_i2_n
)
3392 i1src_copy
= copy_rtx (i1src
);
3394 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3395 copy of I1SRC each time we substitute it, in order to avoid creating
3396 self-referential RTL when we will be substituting I0SRC for I0DEST
3398 newpat
= subst (newpat
, i1dest
, i1src
, 0, 0,
3399 i0_feeds_i1_n
&& i0dest_in_i0src
);
3402 /* Record whether I1's body now appears within I3's body. */
3403 i1_is_used
= n_occurrences
;
3406 /* Likewise for I0 if we have it. */
3408 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3410 if ((FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3411 && ((i0_feeds_i2_n
&& dead_or_set_p (i2
, i0dest
))
3412 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
)))
3413 && !reg_overlap_mentioned_p (i0dest
, newpat
))
3414 || !combinable_i3pat (NULL
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3421 /* If the following substitution will modify I0SRC, make a copy of it
3422 for the case where it is substituted for I0DEST in I1PAT later. */
3423 if (added_sets_1
&& i0_feeds_i1_n
)
3424 i0src_copy
= copy_rtx (i0src
);
3425 /* And a copy for I0DEST in I2PAT substitution. */
3426 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3427 || (i0_feeds_i2_n
)))
3428 i0src_copy2
= copy_rtx (i0src
);
3431 subst_low_luid
= DF_INSN_LUID (i0
);
3432 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0, 0);
3436 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3437 to count all the ways that I2SRC and I1SRC can be used. */
3438 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3439 && i2_is_used
+ added_sets_2
> 1)
3440 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3441 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3443 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3444 && (n_occurrences
+ added_sets_0
3445 + (added_sets_1
&& i0_feeds_i1_n
)
3446 + (added_sets_2
&& i0_feeds_i2_n
)
3448 /* Fail if we tried to make a new register. */
3449 || max_reg_num () != maxreg
3450 /* Fail if we couldn't do something and have a CLOBBER. */
3451 || GET_CODE (newpat
) == CLOBBER
3452 /* Fail if this new pattern is a MULT and we didn't have one before
3453 at the outer level. */
3454 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3461 /* If the actions of the earlier insns must be kept
3462 in addition to substituting them into the latest one,
3463 we must make a new PARALLEL for the latest insn
3464 to hold additional the SETs. */
3466 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3468 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3471 if (GET_CODE (newpat
) == PARALLEL
)
3473 rtvec old
= XVEC (newpat
, 0);
3474 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3475 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3476 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3477 sizeof (old
->elem
[0]) * old
->num_elem
);
3482 total_sets
= 1 + extra_sets
;
3483 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3484 XVECEXP (newpat
, 0, 0) = old
;
3488 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3494 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0, 0);
3496 XVECEXP (newpat
, 0, --total_sets
) = t
;
3502 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0, 0,
3503 i0_feeds_i1_n
&& i0dest_in_i0src
);
3504 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3505 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0, 0);
3507 XVECEXP (newpat
, 0, --total_sets
) = t
;
3511 validate_replacement
:
3513 /* Note which hard regs this insn has as inputs. */
3514 mark_used_regs_combine (newpat
);
3516 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3517 consider splitting this pattern, we might need these clobbers. */
3518 if (i1
&& GET_CODE (newpat
) == PARALLEL
3519 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3521 int len
= XVECLEN (newpat
, 0);
3523 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3524 for (i
= 0; i
< len
; i
++)
3525 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3528 /* We have recognized nothing yet. */
3529 insn_code_number
= -1;
3531 /* See if this is a PARALLEL of two SETs where one SET's destination is
3532 a register that is unused and this isn't marked as an instruction that
3533 might trap in an EH region. In that case, we just need the other SET.
3534 We prefer this over the PARALLEL.
3536 This can occur when simplifying a divmod insn. We *must* test for this
3537 case here because the code below that splits two independent SETs doesn't
3538 handle this case correctly when it updates the register status.
3540 It's pointless doing this if we originally had two sets, one from
3541 i3, and one from i2. Combining then splitting the parallel results
3542 in the original i2 again plus an invalid insn (which we delete).
3543 The net effect is only to move instructions around, which makes
3544 debug info less accurate.
3546 If the remaining SET came from I2 its destination should not be used
3547 between I2 and I3. See PR82024. */
3549 if (!(added_sets_2
&& i1
== 0)
3550 && is_parallel_of_n_reg_sets (newpat
, 2)
3551 && asm_noperands (newpat
) < 0)
3553 rtx set0
= XVECEXP (newpat
, 0, 0);
3554 rtx set1
= XVECEXP (newpat
, 0, 1);
3555 rtx oldpat
= newpat
;
3557 if (((REG_P (SET_DEST (set1
))
3558 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3559 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3560 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3561 && insn_nothrow_p (i3
)
3562 && !side_effects_p (SET_SRC (set1
)))
3565 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3568 else if (((REG_P (SET_DEST (set0
))
3569 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3570 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3571 && find_reg_note (i3
, REG_UNUSED
,
3572 SUBREG_REG (SET_DEST (set0
)))))
3573 && insn_nothrow_p (i3
)
3574 && !side_effects_p (SET_SRC (set0
)))
3576 rtx dest
= SET_DEST (set1
);
3577 if (GET_CODE (dest
) == SUBREG
)
3578 dest
= SUBREG_REG (dest
);
3579 if (!reg_used_between_p (dest
, i2
, i3
))
3582 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3584 if (insn_code_number
>= 0)
3585 changed_i3_dest
= 1;
3589 if (insn_code_number
< 0)
3593 /* Is the result of combination a valid instruction? */
3594 if (insn_code_number
< 0)
3595 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3597 /* If we were combining three insns and the result is a simple SET
3598 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3599 insns. There are two ways to do this. It can be split using a
3600 machine-specific method (like when you have an addition of a large
3601 constant) or by combine in the function find_split_point. */
3603 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3604 && asm_noperands (newpat
) < 0)
3606 rtx parallel
, *split
;
3607 rtx_insn
*m_split_insn
;
3609 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3610 use I2DEST as a scratch register will help. In the latter case,
3611 convert I2DEST to the mode of the source of NEWPAT if we can. */
3613 m_split_insn
= combine_split_insns (newpat
, i3
);
3615 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3616 inputs of NEWPAT. */
3618 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3619 possible to try that as a scratch reg. This would require adding
3620 more code to make it work though. */
3622 if (m_split_insn
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3624 machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3626 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3627 (temporarily, until we are committed to this instruction
3628 combination) does not work: for example, any call to nonzero_bits
3629 on the register (from a splitter in the MD file, for example)
3630 will get the old information, which is invalid.
3632 Since nowadays we can create registers during combine just fine,
3633 we should just create a new one here, not reuse i2dest. */
3635 /* First try to split using the original register as a
3636 scratch register. */
3637 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3638 gen_rtvec (2, newpat
,
3639 gen_rtx_CLOBBER (VOIDmode
,
3641 m_split_insn
= combine_split_insns (parallel
, i3
);
3643 /* If that didn't work, try changing the mode of I2DEST if
3645 if (m_split_insn
== 0
3646 && new_mode
!= GET_MODE (i2dest
)
3647 && new_mode
!= VOIDmode
3648 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3650 machine_mode old_mode
= GET_MODE (i2dest
);
3653 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3654 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3657 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3658 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3661 parallel
= (gen_rtx_PARALLEL
3663 gen_rtvec (2, newpat
,
3664 gen_rtx_CLOBBER (VOIDmode
,
3666 m_split_insn
= combine_split_insns (parallel
, i3
);
3668 if (m_split_insn
== 0
3669 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3673 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3674 buf
= undobuf
.undos
;
3675 undobuf
.undos
= buf
->next
;
3676 buf
->next
= undobuf
.frees
;
3677 undobuf
.frees
= buf
;
3681 i2scratch
= m_split_insn
!= 0;
3684 /* If recog_for_combine has discarded clobbers, try to use them
3685 again for the split. */
3686 if (m_split_insn
== 0 && newpat_vec_with_clobbers
)
3688 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3689 m_split_insn
= combine_split_insns (parallel
, i3
);
3692 if (m_split_insn
&& NEXT_INSN (m_split_insn
) == NULL_RTX
)
3694 rtx m_split_pat
= PATTERN (m_split_insn
);
3695 insn_code_number
= recog_for_combine (&m_split_pat
, i3
, &new_i3_notes
);
3696 if (insn_code_number
>= 0)
3697 newpat
= m_split_pat
;
3699 else if (m_split_insn
&& NEXT_INSN (NEXT_INSN (m_split_insn
)) == NULL_RTX
3700 && (next_nonnote_nondebug_insn (i2
) == i3
3701 || !modified_between_p (PATTERN (m_split_insn
), i2
, i3
)))
3704 rtx newi3pat
= PATTERN (NEXT_INSN (m_split_insn
));
3705 newi2pat
= PATTERN (m_split_insn
);
3707 i3set
= single_set (NEXT_INSN (m_split_insn
));
3708 i2set
= single_set (m_split_insn
);
3710 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3712 /* If I2 or I3 has multiple SETs, we won't know how to track
3713 register status, so don't use these insns. If I2's destination
3714 is used between I2 and I3, we also can't use these insns. */
3716 if (i2_code_number
>= 0 && i2set
&& i3set
3717 && (next_nonnote_nondebug_insn (i2
) == i3
3718 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3719 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3721 if (insn_code_number
>= 0)
3724 /* It is possible that both insns now set the destination of I3.
3725 If so, we must show an extra use of it. */
3727 if (insn_code_number
>= 0)
3729 rtx new_i3_dest
= SET_DEST (i3set
);
3730 rtx new_i2_dest
= SET_DEST (i2set
);
3732 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3733 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3734 || GET_CODE (new_i3_dest
) == SUBREG
)
3735 new_i3_dest
= XEXP (new_i3_dest
, 0);
3737 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3738 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3739 || GET_CODE (new_i2_dest
) == SUBREG
)
3740 new_i2_dest
= XEXP (new_i2_dest
, 0);
3742 if (REG_P (new_i3_dest
)
3743 && REG_P (new_i2_dest
)
3744 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
)
3745 && REGNO (new_i2_dest
) < reg_n_sets_max
)
3746 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3750 /* If we can split it and use I2DEST, go ahead and see if that
3751 helps things be recognized. Verify that none of the registers
3752 are set between I2 and I3. */
3753 if (insn_code_number
< 0
3754 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3755 && (!HAVE_cc0
|| REG_P (i2dest
))
3756 /* We need I2DEST in the proper mode. If it is a hard register
3757 or the only use of a pseudo, we can change its mode.
3758 Make sure we don't change a hard register to have a mode that
3759 isn't valid for it, or change the number of registers. */
3760 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3761 || GET_MODE (*split
) == VOIDmode
3762 || can_change_dest_mode (i2dest
, added_sets_2
,
3764 && (next_nonnote_nondebug_insn (i2
) == i3
3765 || !modified_between_p (*split
, i2
, i3
))
3766 /* We can't overwrite I2DEST if its value is still used by
3768 && ! reg_referenced_p (i2dest
, newpat
))
3770 rtx newdest
= i2dest
;
3771 enum rtx_code split_code
= GET_CODE (*split
);
3772 machine_mode split_mode
= GET_MODE (*split
);
3773 bool subst_done
= false;
3774 newi2pat
= NULL_RTX
;
3778 /* *SPLIT may be part of I2SRC, so make sure we have the
3779 original expression around for later debug processing.
3780 We should not need I2SRC any more in other cases. */
3781 if (MAY_HAVE_DEBUG_BIND_INSNS
)
3782 i2src
= copy_rtx (i2src
);
3786 /* Get NEWDEST as a register in the proper mode. We have already
3787 validated that we can do this. */
3788 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3790 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3791 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3794 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3795 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3799 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3800 an ASHIFT. This can occur if it was inside a PLUS and hence
3801 appeared to be a memory address. This is a kludge. */
3802 if (split_code
== MULT
3803 && CONST_INT_P (XEXP (*split
, 1))
3804 && INTVAL (XEXP (*split
, 1)) > 0
3805 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3807 rtx i_rtx
= gen_int_shift_amount (split_mode
, i
);
3808 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3809 XEXP (*split
, 0), i_rtx
));
3810 /* Update split_code because we may not have a multiply
3812 split_code
= GET_CODE (*split
);
3815 /* Similarly for (plus (mult FOO (const_int pow2))). */
3816 if (split_code
== PLUS
3817 && GET_CODE (XEXP (*split
, 0)) == MULT
3818 && CONST_INT_P (XEXP (XEXP (*split
, 0), 1))
3819 && INTVAL (XEXP (XEXP (*split
, 0), 1)) > 0
3820 && (i
= exact_log2 (UINTVAL (XEXP (XEXP (*split
, 0), 1)))) >= 0)
3822 rtx nsplit
= XEXP (*split
, 0);
3823 rtx i_rtx
= gen_int_shift_amount (GET_MODE (nsplit
), i
);
3824 SUBST (XEXP (*split
, 0), gen_rtx_ASHIFT (GET_MODE (nsplit
),
3827 /* Update split_code because we may not have a multiply
3829 split_code
= GET_CODE (*split
);
3832 #ifdef INSN_SCHEDULING
3833 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3834 be written as a ZERO_EXTEND. */
3835 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3837 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3838 what it really is. */
3839 if (load_extend_op (GET_MODE (SUBREG_REG (*split
)))
3841 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3842 SUBREG_REG (*split
)));
3844 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3845 SUBREG_REG (*split
)));
3849 /* Attempt to split binary operators using arithmetic identities. */
3850 if (BINARY_P (SET_SRC (newpat
))
3851 && split_mode
== GET_MODE (SET_SRC (newpat
))
3852 && ! side_effects_p (SET_SRC (newpat
)))
3854 rtx setsrc
= SET_SRC (newpat
);
3855 machine_mode mode
= GET_MODE (setsrc
);
3856 enum rtx_code code
= GET_CODE (setsrc
);
3857 rtx src_op0
= XEXP (setsrc
, 0);
3858 rtx src_op1
= XEXP (setsrc
, 1);
3860 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3861 if (rtx_equal_p (src_op0
, src_op1
))
3863 newi2pat
= gen_rtx_SET (newdest
, src_op0
);
3864 SUBST (XEXP (setsrc
, 0), newdest
);
3865 SUBST (XEXP (setsrc
, 1), newdest
);
3868 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3869 else if ((code
== PLUS
|| code
== MULT
)
3870 && GET_CODE (src_op0
) == code
3871 && GET_CODE (XEXP (src_op0
, 0)) == code
3872 && (INTEGRAL_MODE_P (mode
)
3873 || (FLOAT_MODE_P (mode
)
3874 && flag_unsafe_math_optimizations
)))
3876 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3877 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3878 rtx r
= XEXP (src_op0
, 1);
3881 /* Split both "((X op Y) op X) op Y" and
3882 "((X op Y) op Y) op X" as "T op T" where T is
3884 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3885 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3887 newi2pat
= gen_rtx_SET (newdest
, XEXP (src_op0
, 0));
3888 SUBST (XEXP (setsrc
, 0), newdest
);
3889 SUBST (XEXP (setsrc
, 1), newdest
);
3892 /* Split "((X op X) op Y) op Y)" as "T op T" where
3894 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3896 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3897 newi2pat
= gen_rtx_SET (newdest
, tmp
);
3898 SUBST (XEXP (setsrc
, 0), newdest
);
3899 SUBST (XEXP (setsrc
, 1), newdest
);
3907 newi2pat
= gen_rtx_SET (newdest
, *split
);
3908 SUBST (*split
, newdest
);
3911 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3913 /* recog_for_combine might have added CLOBBERs to newi2pat.
3914 Make sure NEWPAT does not depend on the clobbered regs. */
3915 if (GET_CODE (newi2pat
) == PARALLEL
)
3916 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3917 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3919 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3920 if (reg_overlap_mentioned_p (reg
, newpat
))
3927 /* If the split point was a MULT and we didn't have one before,
3928 don't use one now. */
3929 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3930 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3934 /* Check for a case where we loaded from memory in a narrow mode and
3935 then sign extended it, but we need both registers. In that case,
3936 we have a PARALLEL with both loads from the same memory location.
3937 We can split this into a load from memory followed by a register-register
3938 copy. This saves at least one insn, more if register allocation can
3941 We cannot do this if the destination of the first assignment is a
3942 condition code register or cc0. We eliminate this case by making sure
3943 the SET_DEST and SET_SRC have the same mode.
3945 We cannot do this if the destination of the second assignment is
3946 a register that we have already assumed is zero-extended. Similarly
3947 for a SUBREG of such a register. */
3949 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
3950 && GET_CODE (newpat
) == PARALLEL
3951 && XVECLEN (newpat
, 0) == 2
3952 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3953 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
3954 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
3955 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
3956 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3957 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3958 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
3959 && !modified_between_p (SET_SRC (XVECEXP (newpat
, 0, 1)), i2
, i3
)
3960 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3961 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3962 && ! (temp_expr
= SET_DEST (XVECEXP (newpat
, 0, 1)),
3964 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3965 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3966 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3967 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3968 != GET_MODE_MASK (word_mode
))))
3969 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
3970 && (temp_expr
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
3972 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3973 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3974 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3975 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3976 != GET_MODE_MASK (word_mode
)))))
3977 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3978 SET_SRC (XVECEXP (newpat
, 0, 1)))
3979 && ! find_reg_note (i3
, REG_UNUSED
,
3980 SET_DEST (XVECEXP (newpat
, 0, 0))))
3984 newi2pat
= XVECEXP (newpat
, 0, 0);
3985 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
3986 newpat
= XVECEXP (newpat
, 0, 1);
3987 SUBST (SET_SRC (newpat
),
3988 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
3989 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3991 if (i2_code_number
>= 0)
3992 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3994 if (insn_code_number
>= 0)
3998 /* Similarly, check for a case where we have a PARALLEL of two independent
3999 SETs but we started with three insns. In this case, we can do the sets
4000 as two separate insns. This case occurs when some SET allows two
4001 other insns to combine, but the destination of that SET is still live.
4003 Also do this if we started with two insns and (at least) one of the
4004 resulting sets is a noop; this noop will be deleted later. */
4006 else if (insn_code_number
< 0 && asm_noperands (newpat
) < 0
4007 && GET_CODE (newpat
) == PARALLEL
4008 && XVECLEN (newpat
, 0) == 2
4009 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
4010 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
4011 && (i1
|| set_noop_p (XVECEXP (newpat
, 0, 0))
4012 || set_noop_p (XVECEXP (newpat
, 0, 1)))
4013 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
4014 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
4015 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
4016 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
4017 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
4018 XVECEXP (newpat
, 0, 0))
4019 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
4020 XVECEXP (newpat
, 0, 1))
4021 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
4022 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
4024 rtx set0
= XVECEXP (newpat
, 0, 0);
4025 rtx set1
= XVECEXP (newpat
, 0, 1);
4027 /* Normally, it doesn't matter which of the two is done first,
4028 but the one that references cc0 can't be the second, and
4029 one which uses any regs/memory set in between i2 and i3 can't
4030 be first. The PARALLEL might also have been pre-existing in i3,
4031 so we need to make sure that we won't wrongly hoist a SET to i2
4032 that would conflict with a death note present in there. */
4033 if (!modified_between_p (SET_SRC (set1
), i2
, i3
)
4034 && !(REG_P (SET_DEST (set1
))
4035 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set1
)))
4036 && !(GET_CODE (SET_DEST (set1
)) == SUBREG
4037 && find_reg_note (i2
, REG_DEAD
,
4038 SUBREG_REG (SET_DEST (set1
))))
4039 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set0
))
4040 /* If I3 is a jump, ensure that set0 is a jump so that
4041 we do not create invalid RTL. */
4042 && (!JUMP_P (i3
) || SET_DEST (set0
) == pc_rtx
)
4048 else if (!modified_between_p (SET_SRC (set0
), i2
, i3
)
4049 && !(REG_P (SET_DEST (set0
))
4050 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set0
)))
4051 && !(GET_CODE (SET_DEST (set0
)) == SUBREG
4052 && find_reg_note (i2
, REG_DEAD
,
4053 SUBREG_REG (SET_DEST (set0
))))
4054 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set1
))
4055 /* If I3 is a jump, ensure that set1 is a jump so that
4056 we do not create invalid RTL. */
4057 && (!JUMP_P (i3
) || SET_DEST (set1
) == pc_rtx
)
4069 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
4071 if (i2_code_number
>= 0)
4073 /* recog_for_combine might have added CLOBBERs to newi2pat.
4074 Make sure NEWPAT does not depend on the clobbered regs. */
4075 if (GET_CODE (newi2pat
) == PARALLEL
)
4077 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
4078 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
4080 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
4081 if (reg_overlap_mentioned_p (reg
, newpat
))
4089 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4093 /* If it still isn't recognized, fail and change things back the way they
4095 if ((insn_code_number
< 0
4096 /* Is the result a reasonable ASM_OPERANDS? */
4097 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
4103 /* If we had to change another insn, make sure it is valid also. */
4104 if (undobuf
.other_insn
)
4106 CLEAR_HARD_REG_SET (newpat_used_regs
);
4108 other_pat
= PATTERN (undobuf
.other_insn
);
4109 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
4112 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
4119 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4120 they are adjacent to each other or not. */
4123 rtx_insn
*p
= prev_nonnote_insn (i3
);
4124 if (p
&& p
!= i2
&& NONJUMP_INSN_P (p
) && newi2pat
4125 && sets_cc0_p (newi2pat
))
4132 /* Only allow this combination if insn_cost reports that the
4133 replacement instructions are cheaper than the originals. */
4134 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
4140 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4144 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
4145 if (undo
->kind
== UNDO_MODE
)
4147 rtx reg
= *undo
->where
.r
;
4148 machine_mode new_mode
= GET_MODE (reg
);
4149 machine_mode old_mode
= undo
->old_contents
.m
;
4151 /* Temporarily revert mode back. */
4152 adjust_reg_mode (reg
, old_mode
);
4154 if (reg
== i2dest
&& i2scratch
)
4156 /* If we used i2dest as a scratch register with a
4157 different mode, substitute it for the original
4158 i2src while its original mode is temporarily
4159 restored, and then clear i2scratch so that we don't
4160 do it again later. */
4161 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
,
4164 /* Put back the new mode. */
4165 adjust_reg_mode (reg
, new_mode
);
4169 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
4170 rtx_insn
*first
, *last
;
4175 last
= last_combined_insn
;
4180 last
= undobuf
.other_insn
;
4182 if (DF_INSN_LUID (last
)
4183 < DF_INSN_LUID (last_combined_insn
))
4184 last
= last_combined_insn
;
4187 /* We're dealing with a reg that changed mode but not
4188 meaning, so we want to turn it into a subreg for
4189 the new mode. However, because of REG sharing and
4190 because its mode had already changed, we have to do
4191 it in two steps. First, replace any debug uses of
4192 reg, with its original mode temporarily restored,
4193 with this copy we have created; then, replace the
4194 copy with the SUBREG of the original shared reg,
4195 once again changed to the new mode. */
4196 propagate_for_debug (first
, last
, reg
, tempreg
,
4198 adjust_reg_mode (reg
, new_mode
);
4199 propagate_for_debug (first
, last
, tempreg
,
4200 lowpart_subreg (old_mode
, reg
, new_mode
),
4206 /* If we will be able to accept this, we have made a
4207 change to the destination of I3. This requires us to
4208 do a few adjustments. */
4210 if (changed_i3_dest
)
4212 PATTERN (i3
) = newpat
;
4213 adjust_for_new_dest (i3
);
4216 /* We now know that we can do this combination. Merge the insns and
4217 update the status of registers and LOG_LINKS. */
4219 if (undobuf
.other_insn
)
4223 PATTERN (undobuf
.other_insn
) = other_pat
;
4225 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4226 ensure that they are still valid. Then add any non-duplicate
4227 notes added by recog_for_combine. */
4228 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
4230 next
= XEXP (note
, 1);
4232 if ((REG_NOTE_KIND (note
) == REG_DEAD
4233 && !reg_referenced_p (XEXP (note
, 0),
4234 PATTERN (undobuf
.other_insn
)))
4235 ||(REG_NOTE_KIND (note
) == REG_UNUSED
4236 && !reg_set_p (XEXP (note
, 0),
4237 PATTERN (undobuf
.other_insn
)))
4238 /* Simply drop equal note since it may be no longer valid
4239 for other_insn. It may be possible to record that CC
4240 register is changed and only discard those notes, but
4241 in practice it's unnecessary complication and doesn't
4242 give any meaningful improvement.
4245 || REG_NOTE_KIND (note
) == REG_EQUAL
4246 || REG_NOTE_KIND (note
) == REG_EQUIV
)
4247 remove_note (undobuf
.other_insn
, note
);
4250 distribute_notes (new_other_notes
, undobuf
.other_insn
,
4251 undobuf
.other_insn
, NULL
, NULL_RTX
, NULL_RTX
,
4258 struct insn_link
*link
;
4261 /* I3 now uses what used to be its destination and which is now
4262 I2's destination. This requires us to do a few adjustments. */
4263 PATTERN (i3
) = newpat
;
4264 adjust_for_new_dest (i3
);
4266 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4269 However, some later insn might be using I2's dest and have
4270 a LOG_LINK pointing at I3. We must remove this link.
4271 The simplest way to remove the link is to point it at I1,
4272 which we know will be a NOTE. */
4274 /* newi2pat is usually a SET here; however, recog_for_combine might
4275 have added some clobbers. */
4276 if (GET_CODE (newi2pat
) == PARALLEL
)
4277 ni2dest
= SET_DEST (XVECEXP (newi2pat
, 0, 0));
4279 ni2dest
= SET_DEST (newi2pat
);
4281 for (insn
= NEXT_INSN (i3
);
4282 insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4283 || insn
!= BB_HEAD (this_basic_block
->next_bb
));
4284 insn
= NEXT_INSN (insn
))
4286 if (NONDEBUG_INSN_P (insn
)
4287 && reg_referenced_p (ni2dest
, PATTERN (insn
)))
4289 FOR_EACH_LOG_LINK (link
, insn
)
4290 if (link
->insn
== i3
)
4299 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4300 struct insn_link
*i3links
, *i2links
, *i1links
= 0, *i0links
= 0;
4303 /* Compute which registers we expect to eliminate. newi2pat may be setting
4304 either i3dest or i2dest, so we must check it. */
4305 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4306 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4309 /* For i1, we need to compute both local elimination and global
4310 elimination information with respect to newi2pat because i1dest
4311 may be the same as i3dest, in which case newi2pat may be setting
4312 i1dest. Global information is used when distributing REG_DEAD
4313 note for i2 and i3, in which case it does matter if newi2pat sets
4316 Local information is used when distributing REG_DEAD note for i1,
4317 in which case it doesn't matter if newi2pat sets i1dest or not.
4318 See PR62151, if we have four insns combination:
4320 i1: r1 <- i1src (using r0)
4322 i2: r0 <- i2src (using r1)
4323 i3: r3 <- i3src (using r0)
4325 From i1's point of view, r0 is eliminated, no matter if it is set
4326 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4327 should be discarded.
4329 Note local information only affects cases in forms like "I1->I2->I3",
4330 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4331 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4333 rtx local_elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4336 rtx elim_i1
= (local_elim_i1
== 0
4337 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4339 /* Same case as i1. */
4340 rtx local_elim_i0
= (i0
== 0 || i0dest_in_i0src
|| !i0dest_killed
4342 rtx elim_i0
= (local_elim_i0
== 0
4343 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4346 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4348 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4349 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4351 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4353 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4355 /* Ensure that we do not have something that should not be shared but
4356 occurs multiple times in the new insns. Check this by first
4357 resetting all the `used' flags and then copying anything is shared. */
4359 reset_used_flags (i3notes
);
4360 reset_used_flags (i2notes
);
4361 reset_used_flags (i1notes
);
4362 reset_used_flags (i0notes
);
4363 reset_used_flags (newpat
);
4364 reset_used_flags (newi2pat
);
4365 if (undobuf
.other_insn
)
4366 reset_used_flags (PATTERN (undobuf
.other_insn
));
4368 i3notes
= copy_rtx_if_shared (i3notes
);
4369 i2notes
= copy_rtx_if_shared (i2notes
);
4370 i1notes
= copy_rtx_if_shared (i1notes
);
4371 i0notes
= copy_rtx_if_shared (i0notes
);
4372 newpat
= copy_rtx_if_shared (newpat
);
4373 newi2pat
= copy_rtx_if_shared (newi2pat
);
4374 if (undobuf
.other_insn
)
4375 reset_used_flags (PATTERN (undobuf
.other_insn
));
4377 INSN_CODE (i3
) = insn_code_number
;
4378 PATTERN (i3
) = newpat
;
4380 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4382 for (rtx link
= CALL_INSN_FUNCTION_USAGE (i3
); link
;
4383 link
= XEXP (link
, 1))
4387 /* I2SRC must still be meaningful at this point. Some
4388 splitting operations can invalidate I2SRC, but those
4389 operations do not apply to calls. */
4391 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4395 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4398 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4403 if (undobuf
.other_insn
)
4404 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4406 /* We had one special case above where I2 had more than one set and
4407 we replaced a destination of one of those sets with the destination
4408 of I3. In that case, we have to update LOG_LINKS of insns later
4409 in this basic block. Note that this (expensive) case is rare.
4411 Also, in this case, we must pretend that all REG_NOTEs for I2
4412 actually came from I3, so that REG_UNUSED notes from I2 will be
4413 properly handled. */
4415 if (i3_subst_into_i2
)
4417 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4418 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4419 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4420 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4421 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4422 && ! find_reg_note (i2
, REG_UNUSED
,
4423 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4424 for (temp_insn
= NEXT_INSN (i2
);
4426 && (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4427 || BB_HEAD (this_basic_block
) != temp_insn
);
4428 temp_insn
= NEXT_INSN (temp_insn
))
4429 if (temp_insn
!= i3
&& NONDEBUG_INSN_P (temp_insn
))
4430 FOR_EACH_LOG_LINK (link
, temp_insn
)
4431 if (link
->insn
== i2
)
4437 while (XEXP (link
, 1))
4438 link
= XEXP (link
, 1);
4439 XEXP (link
, 1) = i2notes
;
4446 LOG_LINKS (i3
) = NULL
;
4448 LOG_LINKS (i2
) = NULL
;
4453 if (MAY_HAVE_DEBUG_BIND_INSNS
&& i2scratch
)
4454 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4456 INSN_CODE (i2
) = i2_code_number
;
4457 PATTERN (i2
) = newi2pat
;
4461 if (MAY_HAVE_DEBUG_BIND_INSNS
&& i2src
)
4462 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4464 SET_INSN_DELETED (i2
);
4469 LOG_LINKS (i1
) = NULL
;
4471 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4472 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
,
4474 SET_INSN_DELETED (i1
);
4479 LOG_LINKS (i0
) = NULL
;
4481 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4482 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
,
4484 SET_INSN_DELETED (i0
);
4487 /* Get death notes for everything that is now used in either I3 or
4488 I2 and used to die in a previous insn. If we built two new
4489 patterns, move from I1 to I2 then I2 to I3 so that we get the
4490 proper movement on registers that I2 modifies. */
4493 from_luid
= DF_INSN_LUID (i0
);
4495 from_luid
= DF_INSN_LUID (i1
);
4497 from_luid
= DF_INSN_LUID (i2
);
4499 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4500 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4502 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4504 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL
,
4505 elim_i2
, elim_i1
, elim_i0
);
4507 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL
,
4508 elim_i2
, elim_i1
, elim_i0
);
4510 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL
,
4511 elim_i2
, local_elim_i1
, local_elim_i0
);
4513 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL
,
4514 elim_i2
, elim_i1
, local_elim_i0
);
4516 distribute_notes (midnotes
, NULL
, i3
, newi2pat
? i2
: NULL
,
4517 elim_i2
, elim_i1
, elim_i0
);
4519 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4520 know these are REG_UNUSED and want them to go to the desired insn,
4521 so we always pass it as i3. */
4523 if (newi2pat
&& new_i2_notes
)
4524 distribute_notes (new_i2_notes
, i2
, i2
, NULL
, NULL_RTX
, NULL_RTX
,
4528 distribute_notes (new_i3_notes
, i3
, i3
, NULL
, NULL_RTX
, NULL_RTX
,
4531 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4532 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4533 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4534 in that case, it might delete I2. Similarly for I2 and I1.
4535 Show an additional death due to the REG_DEAD note we make here. If
4536 we discard it in distribute_notes, we will decrement it again. */
4540 rtx new_note
= alloc_reg_note (REG_DEAD
, i3dest_killed
, NULL_RTX
);
4541 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4542 distribute_notes (new_note
, NULL
, i2
, NULL
, elim_i2
,
4545 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4546 elim_i2
, elim_i1
, elim_i0
);
4549 if (i2dest_in_i2src
)
4551 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4552 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4553 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4554 NULL_RTX
, NULL_RTX
);
4556 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4557 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4560 if (i1dest_in_i1src
)
4562 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4563 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4564 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4565 NULL_RTX
, NULL_RTX
);
4567 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4568 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4571 if (i0dest_in_i0src
)
4573 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4574 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4575 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4576 NULL_RTX
, NULL_RTX
);
4578 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4579 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4582 distribute_links (i3links
);
4583 distribute_links (i2links
);
4584 distribute_links (i1links
);
4585 distribute_links (i0links
);
4589 struct insn_link
*link
;
4590 rtx_insn
*i2_insn
= 0;
4591 rtx i2_val
= 0, set
;
4593 /* The insn that used to set this register doesn't exist, and
4594 this life of the register may not exist either. See if one of
4595 I3's links points to an insn that sets I2DEST. If it does,
4596 that is now the last known value for I2DEST. If we don't update
4597 this and I2 set the register to a value that depended on its old
4598 contents, we will get confused. If this insn is used, thing
4599 will be set correctly in combine_instructions. */
4600 FOR_EACH_LOG_LINK (link
, i3
)
4601 if ((set
= single_set (link
->insn
)) != 0
4602 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4603 i2_insn
= link
->insn
, i2_val
= SET_SRC (set
);
4605 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4607 /* If the reg formerly set in I2 died only once and that was in I3,
4608 zero its use count so it won't make `reload' do any work. */
4610 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4611 && ! i2dest_in_i2src
4612 && REGNO (i2dest
) < reg_n_sets_max
)
4613 INC_REG_N_SETS (REGNO (i2dest
), -1);
4616 if (i1
&& REG_P (i1dest
))
4618 struct insn_link
*link
;
4619 rtx_insn
*i1_insn
= 0;
4620 rtx i1_val
= 0, set
;
4622 FOR_EACH_LOG_LINK (link
, i3
)
4623 if ((set
= single_set (link
->insn
)) != 0
4624 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4625 i1_insn
= link
->insn
, i1_val
= SET_SRC (set
);
4627 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4630 && ! i1dest_in_i1src
4631 && REGNO (i1dest
) < reg_n_sets_max
)
4632 INC_REG_N_SETS (REGNO (i1dest
), -1);
4635 if (i0
&& REG_P (i0dest
))
4637 struct insn_link
*link
;
4638 rtx_insn
*i0_insn
= 0;
4639 rtx i0_val
= 0, set
;
4641 FOR_EACH_LOG_LINK (link
, i3
)
4642 if ((set
= single_set (link
->insn
)) != 0
4643 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4644 i0_insn
= link
->insn
, i0_val
= SET_SRC (set
);
4646 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4649 && ! i0dest_in_i0src
4650 && REGNO (i0dest
) < reg_n_sets_max
)
4651 INC_REG_N_SETS (REGNO (i0dest
), -1);
4654 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4655 been made to this insn. The order is important, because newi2pat
4656 can affect nonzero_bits of newpat. */
4658 note_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4659 note_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4662 if (undobuf
.other_insn
!= NULL_RTX
)
4666 fprintf (dump_file
, "modifying other_insn ");
4667 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4669 df_insn_rescan (undobuf
.other_insn
);
4672 if (i0
&& !(NOTE_P (i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4676 fprintf (dump_file
, "modifying insn i0 ");
4677 dump_insn_slim (dump_file
, i0
);
4679 df_insn_rescan (i0
);
4682 if (i1
&& !(NOTE_P (i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4686 fprintf (dump_file
, "modifying insn i1 ");
4687 dump_insn_slim (dump_file
, i1
);
4689 df_insn_rescan (i1
);
4692 if (i2
&& !(NOTE_P (i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4696 fprintf (dump_file
, "modifying insn i2 ");
4697 dump_insn_slim (dump_file
, i2
);
4699 df_insn_rescan (i2
);
4702 if (i3
&& !(NOTE_P (i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4706 fprintf (dump_file
, "modifying insn i3 ");
4707 dump_insn_slim (dump_file
, i3
);
4709 df_insn_rescan (i3
);
4712 /* Set new_direct_jump_p if a new return or simple jump instruction
4713 has been created. Adjust the CFG accordingly. */
4714 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4716 *new_direct_jump_p
= 1;
4717 mark_jump_label (PATTERN (i3
), i3
, 0);
4718 update_cfg_for_uncondjump (i3
);
4721 if (undobuf
.other_insn
!= NULL_RTX
4722 && (returnjump_p (undobuf
.other_insn
)
4723 || any_uncondjump_p (undobuf
.other_insn
)))
4725 *new_direct_jump_p
= 1;
4726 update_cfg_for_uncondjump (undobuf
.other_insn
);
4729 if (GET_CODE (PATTERN (i3
)) == TRAP_IF
4730 && XEXP (PATTERN (i3
), 0) == const1_rtx
)
4732 basic_block bb
= BLOCK_FOR_INSN (i3
);
4734 remove_edge (split_block (bb
, i3
));
4735 emit_barrier_after_bb (bb
);
4736 *new_direct_jump_p
= 1;
4739 if (undobuf
.other_insn
4740 && GET_CODE (PATTERN (undobuf
.other_insn
)) == TRAP_IF
4741 && XEXP (PATTERN (undobuf
.other_insn
), 0) == const1_rtx
)
4743 basic_block bb
= BLOCK_FOR_INSN (undobuf
.other_insn
);
4745 remove_edge (split_block (bb
, undobuf
.other_insn
));
4746 emit_barrier_after_bb (bb
);
4747 *new_direct_jump_p
= 1;
4750 /* A noop might also need cleaning up of CFG, if it comes from the
4751 simplification of a jump. */
4753 && GET_CODE (newpat
) == SET
4754 && SET_SRC (newpat
) == pc_rtx
4755 && SET_DEST (newpat
) == pc_rtx
)
4757 *new_direct_jump_p
= 1;
4758 update_cfg_for_uncondjump (i3
);
4761 if (undobuf
.other_insn
!= NULL_RTX
4762 && JUMP_P (undobuf
.other_insn
)
4763 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4764 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4765 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4767 *new_direct_jump_p
= 1;
4768 update_cfg_for_uncondjump (undobuf
.other_insn
);
4771 combine_successes
++;
4774 rtx_insn
*ret
= newi2pat
? i2
: i3
;
4775 if (added_links_insn
&& DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (ret
))
4776 ret
= added_links_insn
;
4777 if (added_notes_insn
&& DF_INSN_LUID (added_notes_insn
) < DF_INSN_LUID (ret
))
4778 ret
= added_notes_insn
;
4783 /* Get a marker for undoing to the current state. */
4786 get_undo_marker (void)
4788 return undobuf
.undos
;
4791 /* Undo the modifications up to the marker. */
4794 undo_to_marker (void *marker
)
4796 struct undo
*undo
, *next
;
4798 for (undo
= undobuf
.undos
; undo
!= marker
; undo
= next
)
4806 *undo
->where
.r
= undo
->old_contents
.r
;
4809 *undo
->where
.i
= undo
->old_contents
.i
;
4812 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4815 *undo
->where
.l
= undo
->old_contents
.l
;
4821 undo
->next
= undobuf
.frees
;
4822 undobuf
.frees
= undo
;
4825 undobuf
.undos
= (struct undo
*) marker
;
4828 /* Undo all the modifications recorded in undobuf. */
4836 /* We've committed to accepting the changes we made. Move all
4837 of the undos to the free list. */
4842 struct undo
*undo
, *next
;
4844 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4847 undo
->next
= undobuf
.frees
;
4848 undobuf
.frees
= undo
;
4853 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4854 where we have an arithmetic expression and return that point. LOC will
4857 try_combine will call this function to see if an insn can be split into
4861 find_split_point (rtx
*loc
, rtx_insn
*insn
, bool set_src
)
4864 enum rtx_code code
= GET_CODE (x
);
4866 unsigned HOST_WIDE_INT len
= 0;
4867 HOST_WIDE_INT pos
= 0;
4869 rtx inner
= NULL_RTX
;
4870 scalar_int_mode mode
, inner_mode
;
4872 /* First special-case some codes. */
4876 #ifdef INSN_SCHEDULING
4877 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4879 if (MEM_P (SUBREG_REG (x
)))
4882 return find_split_point (&SUBREG_REG (x
), insn
, false);
4885 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4886 using LO_SUM and HIGH. */
4887 if (HAVE_lo_sum
&& (GET_CODE (XEXP (x
, 0)) == CONST
4888 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
))
4890 machine_mode address_mode
= get_address_mode (x
);
4893 gen_rtx_LO_SUM (address_mode
,
4894 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4896 return &XEXP (XEXP (x
, 0), 0);
4899 /* If we have a PLUS whose second operand is a constant and the
4900 address is not valid, perhaps will can split it up using
4901 the machine-specific way to split large constants. We use
4902 the first pseudo-reg (one of the virtual regs) as a placeholder;
4903 it will not remain in the result. */
4904 if (GET_CODE (XEXP (x
, 0)) == PLUS
4905 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4906 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4907 MEM_ADDR_SPACE (x
)))
4909 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4910 rtx_insn
*seq
= combine_split_insns (gen_rtx_SET (reg
, XEXP (x
, 0)),
4913 /* This should have produced two insns, each of which sets our
4914 placeholder. If the source of the second is a valid address,
4915 we can make put both sources together and make a split point
4919 && NEXT_INSN (seq
) != NULL_RTX
4920 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
4921 && NONJUMP_INSN_P (seq
)
4922 && GET_CODE (PATTERN (seq
)) == SET
4923 && SET_DEST (PATTERN (seq
)) == reg
4924 && ! reg_mentioned_p (reg
,
4925 SET_SRC (PATTERN (seq
)))
4926 && NONJUMP_INSN_P (NEXT_INSN (seq
))
4927 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
4928 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
4929 && memory_address_addr_space_p
4930 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
4931 MEM_ADDR_SPACE (x
)))
4933 rtx src1
= SET_SRC (PATTERN (seq
));
4934 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
4936 /* Replace the placeholder in SRC2 with SRC1. If we can
4937 find where in SRC2 it was placed, that can become our
4938 split point and we can replace this address with SRC2.
4939 Just try two obvious places. */
4941 src2
= replace_rtx (src2
, reg
, src1
);
4943 if (XEXP (src2
, 0) == src1
)
4944 split
= &XEXP (src2
, 0);
4945 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
4946 && XEXP (XEXP (src2
, 0), 0) == src1
)
4947 split
= &XEXP (XEXP (src2
, 0), 0);
4951 SUBST (XEXP (x
, 0), src2
);
4956 /* If that didn't work, perhaps the first operand is complex and
4957 needs to be computed separately, so make a split point there.
4958 This will occur on machines that just support REG + CONST
4959 and have a constant moved through some previous computation. */
4961 else if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
4962 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4963 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4964 return &XEXP (XEXP (x
, 0), 0);
4967 /* If we have a PLUS whose first operand is complex, try computing it
4968 separately by making a split there. */
4969 if (GET_CODE (XEXP (x
, 0)) == PLUS
4970 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4972 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
4973 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4974 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4975 return &XEXP (XEXP (x
, 0), 0);
4979 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4980 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4981 we need to put the operand into a register. So split at that
4984 if (SET_DEST (x
) == cc0_rtx
4985 && GET_CODE (SET_SRC (x
)) != COMPARE
4986 && GET_CODE (SET_SRC (x
)) != ZERO_EXTRACT
4987 && !OBJECT_P (SET_SRC (x
))
4988 && ! (GET_CODE (SET_SRC (x
)) == SUBREG
4989 && OBJECT_P (SUBREG_REG (SET_SRC (x
)))))
4990 return &SET_SRC (x
);
4992 /* See if we can split SET_SRC as it stands. */
4993 split
= find_split_point (&SET_SRC (x
), insn
, true);
4994 if (split
&& split
!= &SET_SRC (x
))
4997 /* See if we can split SET_DEST as it stands. */
4998 split
= find_split_point (&SET_DEST (x
), insn
, false);
4999 if (split
&& split
!= &SET_DEST (x
))
5002 /* See if this is a bitfield assignment with everything constant. If
5003 so, this is an IOR of an AND, so split it into that. */
5004 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
5005 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_DEST (x
), 0)),
5007 && HWI_COMPUTABLE_MODE_P (inner_mode
)
5008 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
5009 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
5010 && CONST_INT_P (SET_SRC (x
))
5011 && ((INTVAL (XEXP (SET_DEST (x
), 1))
5012 + INTVAL (XEXP (SET_DEST (x
), 2)))
5013 <= GET_MODE_PRECISION (inner_mode
))
5014 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
5016 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
5017 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
5018 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
));
5019 rtx dest
= XEXP (SET_DEST (x
), 0);
5020 unsigned HOST_WIDE_INT mask
5021 = (HOST_WIDE_INT_1U
<< len
) - 1;
5024 if (BITS_BIG_ENDIAN
)
5025 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
5027 or_mask
= gen_int_mode (src
<< pos
, inner_mode
);
5030 simplify_gen_binary (IOR
, inner_mode
, dest
, or_mask
));
5033 rtx negmask
= gen_int_mode (~(mask
<< pos
), inner_mode
);
5035 simplify_gen_binary (IOR
, inner_mode
,
5036 simplify_gen_binary (AND
, inner_mode
,
5041 SUBST (SET_DEST (x
), dest
);
5043 split
= find_split_point (&SET_SRC (x
), insn
, true);
5044 if (split
&& split
!= &SET_SRC (x
))
5048 /* Otherwise, see if this is an operation that we can split into two.
5049 If so, try to split that. */
5050 code
= GET_CODE (SET_SRC (x
));
5055 /* If we are AND'ing with a large constant that is only a single
5056 bit and the result is only being used in a context where we
5057 need to know if it is zero or nonzero, replace it with a bit
5058 extraction. This will avoid the large constant, which might
5059 have taken more than one insn to make. If the constant were
5060 not a valid argument to the AND but took only one insn to make,
5061 this is no worse, but if it took more than one insn, it will
5064 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
5065 && REG_P (XEXP (SET_SRC (x
), 0))
5066 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
5067 && REG_P (SET_DEST (x
))
5068 && (split
= find_single_use (SET_DEST (x
), insn
, NULL
)) != 0
5069 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
5070 && XEXP (*split
, 0) == SET_DEST (x
)
5071 && XEXP (*split
, 1) == const0_rtx
)
5073 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
5074 XEXP (SET_SRC (x
), 0),
5075 pos
, NULL_RTX
, 1, 1, 0, 0);
5076 if (extraction
!= 0)
5078 SUBST (SET_SRC (x
), extraction
);
5079 return find_split_point (loc
, insn
, false);
5085 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5086 is known to be on, this can be converted into a NEG of a shift. */
5087 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
5088 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
5089 && ((pos
= exact_log2 (nonzero_bits (XEXP (SET_SRC (x
), 0),
5090 GET_MODE (XEXP (SET_SRC (x
),
5093 machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
5094 rtx pos_rtx
= gen_int_shift_amount (mode
, pos
);
5097 gen_rtx_LSHIFTRT (mode
,
5098 XEXP (SET_SRC (x
), 0),
5101 split
= find_split_point (&SET_SRC (x
), insn
, true);
5102 if (split
&& split
!= &SET_SRC (x
))
5108 inner
= XEXP (SET_SRC (x
), 0);
5110 /* We can't optimize if either mode is a partial integer
5111 mode as we don't know how many bits are significant
5113 if (!is_int_mode (GET_MODE (inner
), &inner_mode
)
5114 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
5118 len
= GET_MODE_PRECISION (inner_mode
);
5124 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_SRC (x
), 0)),
5126 && CONST_INT_P (XEXP (SET_SRC (x
), 1))
5127 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
5129 inner
= XEXP (SET_SRC (x
), 0);
5130 len
= INTVAL (XEXP (SET_SRC (x
), 1));
5131 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
5133 if (BITS_BIG_ENDIAN
)
5134 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
5135 unsignedp
= (code
== ZERO_EXTRACT
);
5144 && pos
+ len
<= GET_MODE_PRECISION (GET_MODE (inner
))
5145 && is_a
<scalar_int_mode
> (GET_MODE (SET_SRC (x
)), &mode
))
5147 /* For unsigned, we have a choice of a shift followed by an
5148 AND or two shifts. Use two shifts for field sizes where the
5149 constant might be too large. We assume here that we can
5150 always at least get 8-bit constants in an AND insn, which is
5151 true for every current RISC. */
5153 if (unsignedp
&& len
<= 8)
5155 unsigned HOST_WIDE_INT mask
5156 = (HOST_WIDE_INT_1U
<< len
) - 1;
5157 rtx pos_rtx
= gen_int_shift_amount (mode
, pos
);
5161 (mode
, gen_lowpart (mode
, inner
), pos_rtx
),
5162 gen_int_mode (mask
, mode
)));
5164 split
= find_split_point (&SET_SRC (x
), insn
, true);
5165 if (split
&& split
!= &SET_SRC (x
))
5170 int left_bits
= GET_MODE_PRECISION (mode
) - len
- pos
;
5171 int right_bits
= GET_MODE_PRECISION (mode
) - len
;
5174 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
5175 gen_rtx_ASHIFT (mode
,
5176 gen_lowpart (mode
, inner
),
5177 gen_int_shift_amount (mode
, left_bits
)),
5178 gen_int_shift_amount (mode
, right_bits
)));
5180 split
= find_split_point (&SET_SRC (x
), insn
, true);
5181 if (split
&& split
!= &SET_SRC (x
))
5186 /* See if this is a simple operation with a constant as the second
5187 operand. It might be that this constant is out of range and hence
5188 could be used as a split point. */
5189 if (BINARY_P (SET_SRC (x
))
5190 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
5191 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
5192 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
5193 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
5194 return &XEXP (SET_SRC (x
), 1);
5196 /* Finally, see if this is a simple operation with its first operand
5197 not in a register. The operation might require this operand in a
5198 register, so return it as a split point. We can always do this
5199 because if the first operand were another operation, we would have
5200 already found it as a split point. */
5201 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
5202 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
5203 return &XEXP (SET_SRC (x
), 0);
5209 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5210 it is better to write this as (not (ior A B)) so we can split it.
5211 Similarly for IOR. */
5212 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
5215 gen_rtx_NOT (GET_MODE (x
),
5216 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
5218 XEXP (XEXP (x
, 0), 0),
5219 XEXP (XEXP (x
, 1), 0))));
5220 return find_split_point (loc
, insn
, set_src
);
5223 /* Many RISC machines have a large set of logical insns. If the
5224 second operand is a NOT, put it first so we will try to split the
5225 other operand first. */
5226 if (GET_CODE (XEXP (x
, 1)) == NOT
)
5228 rtx tem
= XEXP (x
, 0);
5229 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5230 SUBST (XEXP (x
, 1), tem
);
5236 /* Canonicalization can produce (minus A (mult B C)), where C is a
5237 constant. It may be better to try splitting (plus (mult B -C) A)
5238 instead if this isn't a multiply by a power of two. */
5239 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
5240 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5241 && !pow2p_hwi (INTVAL (XEXP (XEXP (x
, 1), 1))))
5243 machine_mode mode
= GET_MODE (x
);
5244 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
5245 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
5246 SUBST (*loc
, gen_rtx_PLUS (mode
,
5248 XEXP (XEXP (x
, 1), 0),
5249 gen_int_mode (other_int
,
5252 return find_split_point (loc
, insn
, set_src
);
5255 /* Split at a multiply-accumulate instruction. However if this is
5256 the SET_SRC, we likely do not have such an instruction and it's
5257 worthless to try this split. */
5259 && (GET_CODE (XEXP (x
, 0)) == MULT
5260 || (GET_CODE (XEXP (x
, 0)) == ASHIFT
5261 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
5268 /* Otherwise, select our actions depending on our rtx class. */
5269 switch (GET_RTX_CLASS (code
))
5271 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5273 split
= find_split_point (&XEXP (x
, 2), insn
, false);
5278 case RTX_COMM_ARITH
:
5280 case RTX_COMM_COMPARE
:
5281 split
= find_split_point (&XEXP (x
, 1), insn
, false);
5286 /* Some machines have (and (shift ...) ...) insns. If X is not
5287 an AND, but XEXP (X, 0) is, use it as our split point. */
5288 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
5289 return &XEXP (x
, 0);
5291 split
= find_split_point (&XEXP (x
, 0), insn
, false);
5297 /* Otherwise, we don't have a split point. */
5302 /* Throughout X, replace FROM with TO, and return the result.
5303 The result is TO if X is FROM;
5304 otherwise the result is X, but its contents may have been modified.
5305 If they were modified, a record was made in undobuf so that
5306 undo_all will (among other things) return X to its original state.
5308 If the number of changes necessary is too much to record to undo,
5309 the excess changes are not made, so the result is invalid.
5310 The changes already made can still be undone.
5311 undobuf.num_undo is incremented for such changes, so by testing that
5312 the caller can tell whether the result is valid.
5314 `n_occurrences' is incremented each time FROM is replaced.
5316 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5318 IN_COND is nonzero if we are at the top level of a condition.
5320 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5321 by copying if `n_occurrences' is nonzero. */
5324 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int in_cond
, int unique_copy
)
5326 enum rtx_code code
= GET_CODE (x
);
5327 machine_mode op0_mode
= VOIDmode
;
5332 /* Two expressions are equal if they are identical copies of a shared
5333 RTX or if they are both registers with the same register number
5336 #define COMBINE_RTX_EQUAL_P(X,Y) \
5338 || (REG_P (X) && REG_P (Y) \
5339 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5341 /* Do not substitute into clobbers of regs -- this will never result in
5343 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
5346 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
5349 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
5352 /* If X and FROM are the same register but different modes, they
5353 will not have been seen as equal above. However, the log links code
5354 will make a LOG_LINKS entry for that case. If we do nothing, we
5355 will try to rerecognize our original insn and, when it succeeds,
5356 we will delete the feeding insn, which is incorrect.
5358 So force this insn not to match in this (rare) case. */
5359 if (! in_dest
&& code
== REG
&& REG_P (from
)
5360 && reg_overlap_mentioned_p (x
, from
))
5361 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5363 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5364 of which may contain things that can be combined. */
5365 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5368 /* It is possible to have a subexpression appear twice in the insn.
5369 Suppose that FROM is a register that appears within TO.
5370 Then, after that subexpression has been scanned once by `subst',
5371 the second time it is scanned, TO may be found. If we were
5372 to scan TO here, we would find FROM within it and create a
5373 self-referent rtl structure which is completely wrong. */
5374 if (COMBINE_RTX_EQUAL_P (x
, to
))
5377 /* Parallel asm_operands need special attention because all of the
5378 inputs are shared across the arms. Furthermore, unsharing the
5379 rtl results in recognition failures. Failure to handle this case
5380 specially can result in circular rtl.
5382 Solve this by doing a normal pass across the first entry of the
5383 parallel, and only processing the SET_DESTs of the subsequent
5386 if (code
== PARALLEL
5387 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5388 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5390 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, 0, unique_copy
);
5392 /* If this substitution failed, this whole thing fails. */
5393 if (GET_CODE (new_rtx
) == CLOBBER
5394 && XEXP (new_rtx
, 0) == const0_rtx
)
5397 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5399 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5401 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5404 && GET_CODE (dest
) != CC0
5405 && GET_CODE (dest
) != PC
)
5407 new_rtx
= subst (dest
, from
, to
, 0, 0, unique_copy
);
5409 /* If this substitution failed, this whole thing fails. */
5410 if (GET_CODE (new_rtx
) == CLOBBER
5411 && XEXP (new_rtx
, 0) == const0_rtx
)
5414 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5420 len
= GET_RTX_LENGTH (code
);
5421 fmt
= GET_RTX_FORMAT (code
);
5423 /* We don't need to process a SET_DEST that is a register, CC0,
5424 or PC, so set up to skip this common case. All other cases
5425 where we want to suppress replacing something inside a
5426 SET_SRC are handled via the IN_DEST operand. */
5428 && (REG_P (SET_DEST (x
))
5429 || GET_CODE (SET_DEST (x
)) == CC0
5430 || GET_CODE (SET_DEST (x
)) == PC
))
5433 /* Trying to simplify the operands of a widening MULT is not likely
5434 to create RTL matching a machine insn. */
5436 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
5437 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
5438 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
5439 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
5440 && REG_P (XEXP (XEXP (x
, 0), 0))
5441 && REG_P (XEXP (XEXP (x
, 1), 0))
5446 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5449 op0_mode
= GET_MODE (XEXP (x
, 0));
5451 for (i
= 0; i
< len
; i
++)
5456 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5458 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5460 new_rtx
= (unique_copy
&& n_occurrences
5461 ? copy_rtx (to
) : to
);
5466 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0, 0,
5469 /* If this substitution failed, this whole thing
5471 if (GET_CODE (new_rtx
) == CLOBBER
5472 && XEXP (new_rtx
, 0) == const0_rtx
)
5476 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5479 else if (fmt
[i
] == 'e')
5481 /* If this is a register being set, ignore it. */
5482 new_rtx
= XEXP (x
, i
);
5485 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5487 || code
== STRICT_LOW_PART
))
5490 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5492 /* In general, don't install a subreg involving two
5493 modes not tieable. It can worsen register
5494 allocation, and can even make invalid reload
5495 insns, since the reg inside may need to be copied
5496 from in the outside mode, and that may be invalid
5497 if it is an fp reg copied in integer mode.
5499 We allow two exceptions to this: It is valid if
5500 it is inside another SUBREG and the mode of that
5501 SUBREG and the mode of the inside of TO is
5502 tieable and it is valid if X is a SET that copies
5505 if (GET_CODE (to
) == SUBREG
5506 && !targetm
.modes_tieable_p (GET_MODE (to
),
5507 GET_MODE (SUBREG_REG (to
)))
5508 && ! (code
== SUBREG
5509 && (targetm
.modes_tieable_p
5510 (GET_MODE (x
), GET_MODE (SUBREG_REG (to
)))))
5514 && XEXP (x
, 0) == cc0_rtx
))))
5515 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5519 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5520 && simplify_subreg_regno (REGNO (to
), GET_MODE (to
),
5523 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5525 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5529 /* If we are in a SET_DEST, suppress most cases unless we
5530 have gone inside a MEM, in which case we want to
5531 simplify the address. We assume here that things that
5532 are actually part of the destination have their inner
5533 parts in the first expression. This is true for SUBREG,
5534 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5535 things aside from REG and MEM that should appear in a
5537 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5539 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5540 || code
== ZERO_EXTRACT
))
5543 code
== IF_THEN_ELSE
&& i
== 0,
5546 /* If we found that we will have to reject this combination,
5547 indicate that by returning the CLOBBER ourselves, rather than
5548 an expression containing it. This will speed things up as
5549 well as prevent accidents where two CLOBBERs are considered
5550 to be equal, thus producing an incorrect simplification. */
5552 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5555 if (GET_CODE (x
) == SUBREG
&& CONST_SCALAR_INT_P (new_rtx
))
5557 machine_mode mode
= GET_MODE (x
);
5559 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5560 GET_MODE (SUBREG_REG (x
)),
5563 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5565 else if (CONST_SCALAR_INT_P (new_rtx
)
5566 && GET_CODE (x
) == ZERO_EXTEND
)
5568 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
5569 new_rtx
, GET_MODE (XEXP (x
, 0)));
5573 SUBST (XEXP (x
, i
), new_rtx
);
5578 /* Check if we are loading something from the constant pool via float
5579 extension; in this case we would undo compress_float_constant
5580 optimization and degenerate constant load to an immediate value. */
5581 if (GET_CODE (x
) == FLOAT_EXTEND
5582 && MEM_P (XEXP (x
, 0))
5583 && MEM_READONLY_P (XEXP (x
, 0)))
5585 rtx tmp
= avoid_constant_pool_reference (x
);
5590 /* Try to simplify X. If the simplification changed the code, it is likely
5591 that further simplification will help, so loop, but limit the number
5592 of repetitions that will be performed. */
5594 for (i
= 0; i
< 4; i
++)
5596 /* If X is sufficiently simple, don't bother trying to do anything
5598 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5599 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
, in_cond
);
5601 if (GET_CODE (x
) == code
)
5604 code
= GET_CODE (x
);
5606 /* We no longer know the original mode of operand 0 since we
5607 have changed the form of X) */
5608 op0_mode
= VOIDmode
;
5614 /* If X is a commutative operation whose operands are not in the canonical
5615 order, use substitutions to swap them. */
5618 maybe_swap_commutative_operands (rtx x
)
5620 if (COMMUTATIVE_ARITH_P (x
)
5621 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5623 rtx temp
= XEXP (x
, 0);
5624 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5625 SUBST (XEXP (x
, 1), temp
);
5629 /* Simplify X, a piece of RTL. We just operate on the expression at the
5630 outer level; call `subst' to simplify recursively. Return the new
5633 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5634 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5638 combine_simplify_rtx (rtx x
, machine_mode op0_mode
, int in_dest
,
5641 enum rtx_code code
= GET_CODE (x
);
5642 machine_mode mode
= GET_MODE (x
);
5643 scalar_int_mode int_mode
;
5647 /* If this is a commutative operation, put a constant last and a complex
5648 expression first. We don't need to do this for comparisons here. */
5649 maybe_swap_commutative_operands (x
);
5651 /* Try to fold this expression in case we have constants that weren't
5654 switch (GET_RTX_CLASS (code
))
5657 if (op0_mode
== VOIDmode
)
5658 op0_mode
= GET_MODE (XEXP (x
, 0));
5659 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5662 case RTX_COMM_COMPARE
:
5664 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5665 if (cmp_mode
== VOIDmode
)
5667 cmp_mode
= GET_MODE (XEXP (x
, 1));
5668 if (cmp_mode
== VOIDmode
)
5669 cmp_mode
= op0_mode
;
5671 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5672 XEXP (x
, 0), XEXP (x
, 1));
5675 case RTX_COMM_ARITH
:
5677 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5679 case RTX_BITFIELD_OPS
:
5681 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5682 XEXP (x
, 1), XEXP (x
, 2));
5691 code
= GET_CODE (temp
);
5692 op0_mode
= VOIDmode
;
5693 mode
= GET_MODE (temp
);
5696 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5697 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5698 things. Check for cases where both arms are testing the same
5701 Don't do anything if all operands are very simple. */
5704 && ((!OBJECT_P (XEXP (x
, 0))
5705 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5706 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5707 || (!OBJECT_P (XEXP (x
, 1))
5708 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5709 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5711 && (!OBJECT_P (XEXP (x
, 0))
5712 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5713 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5715 rtx cond
, true_rtx
, false_rtx
;
5717 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5719 /* If everything is a comparison, what we have is highly unlikely
5720 to be simpler, so don't use it. */
5721 && ! (COMPARISON_P (x
)
5722 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
))))
5724 rtx cop1
= const0_rtx
;
5725 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5727 if (cond_code
== NE
&& COMPARISON_P (cond
))
5730 /* Simplify the alternative arms; this may collapse the true and
5731 false arms to store-flag values. Be careful to use copy_rtx
5732 here since true_rtx or false_rtx might share RTL with x as a
5733 result of the if_then_else_cond call above. */
5734 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5735 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5737 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5738 is unlikely to be simpler. */
5739 if (general_operand (true_rtx
, VOIDmode
)
5740 && general_operand (false_rtx
, VOIDmode
))
5742 enum rtx_code reversed
;
5744 /* Restarting if we generate a store-flag expression will cause
5745 us to loop. Just drop through in this case. */
5747 /* If the result values are STORE_FLAG_VALUE and zero, we can
5748 just make the comparison operation. */
5749 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5750 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5752 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5753 && ((reversed
= reversed_comparison_code_parts
5754 (cond_code
, cond
, cop1
, NULL
))
5756 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5759 /* Likewise, we can make the negate of a comparison operation
5760 if the result values are - STORE_FLAG_VALUE and zero. */
5761 else if (CONST_INT_P (true_rtx
)
5762 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5763 && false_rtx
== const0_rtx
)
5764 x
= simplify_gen_unary (NEG
, mode
,
5765 simplify_gen_relational (cond_code
,
5769 else if (CONST_INT_P (false_rtx
)
5770 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5771 && true_rtx
== const0_rtx
5772 && ((reversed
= reversed_comparison_code_parts
5773 (cond_code
, cond
, cop1
, NULL
))
5775 x
= simplify_gen_unary (NEG
, mode
,
5776 simplify_gen_relational (reversed
,
5781 return gen_rtx_IF_THEN_ELSE (mode
,
5782 simplify_gen_relational (cond_code
,
5787 true_rtx
, false_rtx
);
5789 code
= GET_CODE (x
);
5790 op0_mode
= VOIDmode
;
5795 /* First see if we can apply the inverse distributive law. */
5796 if (code
== PLUS
|| code
== MINUS
5797 || code
== AND
|| code
== IOR
|| code
== XOR
)
5799 x
= apply_distributive_law (x
);
5800 code
= GET_CODE (x
);
5801 op0_mode
= VOIDmode
;
5804 /* If CODE is an associative operation not otherwise handled, see if we
5805 can associate some operands. This can win if they are constants or
5806 if they are logically related (i.e. (a & b) & a). */
5807 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5808 || code
== AND
|| code
== IOR
|| code
== XOR
5809 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5810 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5811 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5813 if (GET_CODE (XEXP (x
, 0)) == code
)
5815 rtx other
= XEXP (XEXP (x
, 0), 0);
5816 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5817 rtx inner_op1
= XEXP (x
, 1);
5820 /* Make sure we pass the constant operand if any as the second
5821 one if this is a commutative operation. */
5822 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5823 std::swap (inner_op0
, inner_op1
);
5824 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5825 : code
== DIV
? MULT
5827 mode
, inner_op0
, inner_op1
);
5829 /* For commutative operations, try the other pair if that one
5831 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5833 other
= XEXP (XEXP (x
, 0), 1);
5834 inner
= simplify_binary_operation (code
, mode
,
5835 XEXP (XEXP (x
, 0), 0),
5840 return simplify_gen_binary (code
, mode
, other
, inner
);
5844 /* A little bit of algebraic simplification here. */
5848 /* Ensure that our address has any ASHIFTs converted to MULT in case
5849 address-recognizing predicates are called later. */
5850 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5851 SUBST (XEXP (x
, 0), temp
);
5855 if (op0_mode
== VOIDmode
)
5856 op0_mode
= GET_MODE (SUBREG_REG (x
));
5858 /* See if this can be moved to simplify_subreg. */
5859 if (CONSTANT_P (SUBREG_REG (x
))
5860 && known_eq (subreg_lowpart_offset (mode
, op0_mode
), SUBREG_BYTE (x
))
5861 /* Don't call gen_lowpart if the inner mode
5862 is VOIDmode and we cannot simplify it, as SUBREG without
5863 inner mode is invalid. */
5864 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5865 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5866 return gen_lowpart (mode
, SUBREG_REG (x
));
5868 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
5872 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
5877 /* If op is known to have all lower bits zero, the result is zero. */
5878 scalar_int_mode int_mode
, int_op0_mode
;
5880 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5881 && is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
)
5882 && (GET_MODE_PRECISION (int_mode
)
5883 < GET_MODE_PRECISION (int_op0_mode
))
5884 && known_eq (subreg_lowpart_offset (int_mode
, int_op0_mode
),
5886 && HWI_COMPUTABLE_MODE_P (int_op0_mode
)
5887 && (nonzero_bits (SUBREG_REG (x
), int_op0_mode
)
5888 & GET_MODE_MASK (int_mode
)) == 0)
5889 return CONST0_RTX (int_mode
);
5892 /* Don't change the mode of the MEM if that would change the meaning
5894 if (MEM_P (SUBREG_REG (x
))
5895 && (MEM_VOLATILE_P (SUBREG_REG (x
))
5896 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0),
5897 MEM_ADDR_SPACE (SUBREG_REG (x
)))))
5898 return gen_rtx_CLOBBER (mode
, const0_rtx
);
5900 /* Note that we cannot do any narrowing for non-constants since
5901 we might have been counting on using the fact that some bits were
5902 zero. We now do this in the SET. */
5907 temp
= expand_compound_operation (XEXP (x
, 0));
5909 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5910 replaced by (lshiftrt X C). This will convert
5911 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5913 if (GET_CODE (temp
) == ASHIFTRT
5914 && CONST_INT_P (XEXP (temp
, 1))
5915 && INTVAL (XEXP (temp
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
5916 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
5917 INTVAL (XEXP (temp
, 1)));
5919 /* If X has only a single bit that might be nonzero, say, bit I, convert
5920 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5921 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5922 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5923 or a SUBREG of one since we'd be making the expression more
5924 complex if it was just a register. */
5927 && ! (GET_CODE (temp
) == SUBREG
5928 && REG_P (SUBREG_REG (temp
)))
5929 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5930 && (i
= exact_log2 (nonzero_bits (temp
, int_mode
))) >= 0)
5932 rtx temp1
= simplify_shift_const
5933 (NULL_RTX
, ASHIFTRT
, int_mode
,
5934 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
, temp
,
5935 GET_MODE_PRECISION (int_mode
) - 1 - i
),
5936 GET_MODE_PRECISION (int_mode
) - 1 - i
);
5938 /* If all we did was surround TEMP with the two shifts, we
5939 haven't improved anything, so don't use it. Otherwise,
5940 we are better off with TEMP1. */
5941 if (GET_CODE (temp1
) != ASHIFTRT
5942 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
5943 || XEXP (XEXP (temp1
, 0), 0) != temp
)
5949 /* We can't handle truncation to a partial integer mode here
5950 because we don't know the real bitsize of the partial
5952 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
5955 if (HWI_COMPUTABLE_MODE_P (mode
))
5957 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5958 GET_MODE_MASK (mode
), 0));
5960 /* We can truncate a constant value and return it. */
5961 if (CONST_INT_P (XEXP (x
, 0)))
5962 return gen_int_mode (INTVAL (XEXP (x
, 0)), mode
);
5964 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5965 whose value is a comparison can be replaced with a subreg if
5966 STORE_FLAG_VALUE permits. */
5967 if (HWI_COMPUTABLE_MODE_P (mode
)
5968 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
5969 && (temp
= get_last_value (XEXP (x
, 0)))
5970 && COMPARISON_P (temp
))
5971 return gen_lowpart (mode
, XEXP (x
, 0));
5975 /* (const (const X)) can become (const X). Do it this way rather than
5976 returning the inner CONST since CONST can be shared with a
5978 if (GET_CODE (XEXP (x
, 0)) == CONST
)
5979 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
5983 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5984 can add in an offset. find_split_point will split this address up
5985 again if it doesn't match. */
5986 if (HAVE_lo_sum
&& GET_CODE (XEXP (x
, 0)) == HIGH
5987 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
5992 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5993 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5994 bit-field and can be replaced by either a sign_extend or a
5995 sign_extract. The `and' may be a zero_extend and the two
5996 <c>, -<c> constants may be reversed. */
5997 if (GET_CODE (XEXP (x
, 0)) == XOR
5998 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5999 && CONST_INT_P (XEXP (x
, 1))
6000 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
6001 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
6002 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
6003 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
6004 && HWI_COMPUTABLE_MODE_P (int_mode
)
6005 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
6006 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
6007 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
6008 == (HOST_WIDE_INT_1U
<< (i
+ 1)) - 1))
6009 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
6010 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))
6011 == (unsigned int) i
+ 1))))
6012 return simplify_shift_const
6013 (NULL_RTX
, ASHIFTRT
, int_mode
,
6014 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6015 XEXP (XEXP (XEXP (x
, 0), 0), 0),
6016 GET_MODE_PRECISION (int_mode
) - (i
+ 1)),
6017 GET_MODE_PRECISION (int_mode
) - (i
+ 1));
6019 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6020 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6021 the bitsize of the mode - 1. This allows simplification of
6022 "a = (b & 8) == 0;" */
6023 if (XEXP (x
, 1) == constm1_rtx
6024 && !REG_P (XEXP (x
, 0))
6025 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
6026 && REG_P (SUBREG_REG (XEXP (x
, 0))))
6027 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6028 && nonzero_bits (XEXP (x
, 0), int_mode
) == 1)
6029 return simplify_shift_const
6030 (NULL_RTX
, ASHIFTRT
, int_mode
,
6031 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6032 gen_rtx_XOR (int_mode
, XEXP (x
, 0),
6034 GET_MODE_PRECISION (int_mode
) - 1),
6035 GET_MODE_PRECISION (int_mode
) - 1);
6037 /* If we are adding two things that have no bits in common, convert
6038 the addition into an IOR. This will often be further simplified,
6039 for example in cases like ((a & 1) + (a & 2)), which can
6042 if (HWI_COMPUTABLE_MODE_P (mode
)
6043 && (nonzero_bits (XEXP (x
, 0), mode
)
6044 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
6046 /* Try to simplify the expression further. */
6047 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6048 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
, 0);
6050 /* If we could, great. If not, do not go ahead with the IOR
6051 replacement, since PLUS appears in many special purpose
6052 address arithmetic instructions. */
6053 if (GET_CODE (temp
) != CLOBBER
6054 && (GET_CODE (temp
) != IOR
6055 || ((XEXP (temp
, 0) != XEXP (x
, 0)
6056 || XEXP (temp
, 1) != XEXP (x
, 1))
6057 && (XEXP (temp
, 0) != XEXP (x
, 1)
6058 || XEXP (temp
, 1) != XEXP (x
, 0)))))
6062 /* Canonicalize x + x into x << 1. */
6063 if (GET_MODE_CLASS (mode
) == MODE_INT
6064 && rtx_equal_p (XEXP (x
, 0), XEXP (x
, 1))
6065 && !side_effects_p (XEXP (x
, 0)))
6066 return simplify_gen_binary (ASHIFT
, mode
, XEXP (x
, 0), const1_rtx
);
6071 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6072 (and <foo> (const_int pow2-1)) */
6073 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6074 && GET_CODE (XEXP (x
, 1)) == AND
6075 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
6076 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x
, 1), 1)))
6077 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
6078 return simplify_and_const_int (NULL_RTX
, int_mode
, XEXP (x
, 0),
6079 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
6083 /* If we have (mult (plus A B) C), apply the distributive law and then
6084 the inverse distributive law to see if things simplify. This
6085 occurs mostly in addresses, often when unrolling loops. */
6087 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
6089 rtx result
= distribute_and_simplify_rtx (x
, 0);
6094 /* Try simplify a*(b/c) as (a*b)/c. */
6095 if (FLOAT_MODE_P (mode
) && flag_associative_math
6096 && GET_CODE (XEXP (x
, 0)) == DIV
)
6098 rtx tem
= simplify_binary_operation (MULT
, mode
,
6099 XEXP (XEXP (x
, 0), 0),
6102 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
6107 /* If this is a divide by a power of two, treat it as a shift if
6108 its first operand is a shift. */
6109 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6110 && CONST_INT_P (XEXP (x
, 1))
6111 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
6112 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
6113 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
6114 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
6115 || GET_CODE (XEXP (x
, 0)) == ROTATE
6116 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
6117 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, int_mode
,
6122 case GT
: case GTU
: case GE
: case GEU
:
6123 case LT
: case LTU
: case LE
: case LEU
:
6124 case UNEQ
: case LTGT
:
6125 case UNGT
: case UNGE
:
6126 case UNLT
: case UNLE
:
6127 case UNORDERED
: case ORDERED
:
6128 /* If the first operand is a condition code, we can't do anything
6130 if (GET_CODE (XEXP (x
, 0)) == COMPARE
6131 || (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
6132 && ! CC0_P (XEXP (x
, 0))))
6134 rtx op0
= XEXP (x
, 0);
6135 rtx op1
= XEXP (x
, 1);
6136 enum rtx_code new_code
;
6138 if (GET_CODE (op0
) == COMPARE
)
6139 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
6141 /* Simplify our comparison, if possible. */
6142 new_code
= simplify_comparison (code
, &op0
, &op1
);
6144 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6145 if only the low-order bit is possibly nonzero in X (such as when
6146 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6147 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6148 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6151 Remove any ZERO_EXTRACT we made when thinking this was a
6152 comparison. It may now be simpler to use, e.g., an AND. If a
6153 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6154 the call to make_compound_operation in the SET case.
6156 Don't apply these optimizations if the caller would
6157 prefer a comparison rather than a value.
6158 E.g., for the condition in an IF_THEN_ELSE most targets need
6159 an explicit comparison. */
6164 else if (STORE_FLAG_VALUE
== 1
6166 && is_int_mode (mode
, &int_mode
)
6167 && op1
== const0_rtx
6168 && int_mode
== GET_MODE (op0
)
6169 && nonzero_bits (op0
, int_mode
) == 1)
6170 return gen_lowpart (int_mode
,
6171 expand_compound_operation (op0
));
6173 else if (STORE_FLAG_VALUE
== 1
6175 && is_int_mode (mode
, &int_mode
)
6176 && op1
== const0_rtx
6177 && int_mode
== GET_MODE (op0
)
6178 && (num_sign_bit_copies (op0
, int_mode
)
6179 == GET_MODE_PRECISION (int_mode
)))
6181 op0
= expand_compound_operation (op0
);
6182 return simplify_gen_unary (NEG
, int_mode
,
6183 gen_lowpart (int_mode
, op0
),
6187 else if (STORE_FLAG_VALUE
== 1
6189 && is_int_mode (mode
, &int_mode
)
6190 && op1
== const0_rtx
6191 && int_mode
== GET_MODE (op0
)
6192 && nonzero_bits (op0
, int_mode
) == 1)
6194 op0
= expand_compound_operation (op0
);
6195 return simplify_gen_binary (XOR
, int_mode
,
6196 gen_lowpart (int_mode
, op0
),
6200 else if (STORE_FLAG_VALUE
== 1
6202 && is_int_mode (mode
, &int_mode
)
6203 && op1
== const0_rtx
6204 && int_mode
== GET_MODE (op0
)
6205 && (num_sign_bit_copies (op0
, int_mode
)
6206 == GET_MODE_PRECISION (int_mode
)))
6208 op0
= expand_compound_operation (op0
);
6209 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), 1);
6212 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6217 else if (STORE_FLAG_VALUE
== -1
6219 && is_int_mode (mode
, &int_mode
)
6220 && op1
== const0_rtx
6221 && int_mode
== GET_MODE (op0
)
6222 && (num_sign_bit_copies (op0
, int_mode
)
6223 == GET_MODE_PRECISION (int_mode
)))
6224 return gen_lowpart (int_mode
, expand_compound_operation (op0
));
6226 else if (STORE_FLAG_VALUE
== -1
6228 && is_int_mode (mode
, &int_mode
)
6229 && op1
== const0_rtx
6230 && int_mode
== GET_MODE (op0
)
6231 && nonzero_bits (op0
, int_mode
) == 1)
6233 op0
= expand_compound_operation (op0
);
6234 return simplify_gen_unary (NEG
, int_mode
,
6235 gen_lowpart (int_mode
, op0
),
6239 else if (STORE_FLAG_VALUE
== -1
6241 && is_int_mode (mode
, &int_mode
)
6242 && op1
== const0_rtx
6243 && int_mode
== GET_MODE (op0
)
6244 && (num_sign_bit_copies (op0
, int_mode
)
6245 == GET_MODE_PRECISION (int_mode
)))
6247 op0
= expand_compound_operation (op0
);
6248 return simplify_gen_unary (NOT
, int_mode
,
6249 gen_lowpart (int_mode
, op0
),
6253 /* If X is 0/1, (eq X 0) is X-1. */
6254 else if (STORE_FLAG_VALUE
== -1
6256 && is_int_mode (mode
, &int_mode
)
6257 && op1
== const0_rtx
6258 && int_mode
== GET_MODE (op0
)
6259 && nonzero_bits (op0
, int_mode
) == 1)
6261 op0
= expand_compound_operation (op0
);
6262 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), -1);
6265 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6266 one bit that might be nonzero, we can convert (ne x 0) to
6267 (ashift x c) where C puts the bit in the sign bit. Remove any
6268 AND with STORE_FLAG_VALUE when we are done, since we are only
6269 going to test the sign bit. */
6271 && is_int_mode (mode
, &int_mode
)
6272 && HWI_COMPUTABLE_MODE_P (int_mode
)
6273 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
6274 && op1
== const0_rtx
6275 && int_mode
== GET_MODE (op0
)
6276 && (i
= exact_log2 (nonzero_bits (op0
, int_mode
))) >= 0)
6278 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6279 expand_compound_operation (op0
),
6280 GET_MODE_PRECISION (int_mode
) - 1 - i
);
6281 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
6287 /* If the code changed, return a whole new comparison.
6288 We also need to avoid using SUBST in cases where
6289 simplify_comparison has widened a comparison with a CONST_INT,
6290 since in that case the wider CONST_INT may fail the sanity
6291 checks in do_SUBST. */
6292 if (new_code
!= code
6293 || (CONST_INT_P (op1
)
6294 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 0))
6295 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 1))))
6296 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
6298 /* Otherwise, keep this operation, but maybe change its operands.
6299 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6300 SUBST (XEXP (x
, 0), op0
);
6301 SUBST (XEXP (x
, 1), op1
);
6306 return simplify_if_then_else (x
);
6312 /* If we are processing SET_DEST, we are done. */
6316 return expand_compound_operation (x
);
6319 return simplify_set (x
);
6323 return simplify_logical (x
);
6330 /* If this is a shift by a constant amount, simplify it. */
6331 if (CONST_INT_P (XEXP (x
, 1)))
6332 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
6333 INTVAL (XEXP (x
, 1)));
6335 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
6337 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
6339 << exact_log2 (GET_MODE_UNIT_BITSIZE
6352 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6355 simplify_if_then_else (rtx x
)
6357 machine_mode mode
= GET_MODE (x
);
6358 rtx cond
= XEXP (x
, 0);
6359 rtx true_rtx
= XEXP (x
, 1);
6360 rtx false_rtx
= XEXP (x
, 2);
6361 enum rtx_code true_code
= GET_CODE (cond
);
6362 int comparison_p
= COMPARISON_P (cond
);
6365 enum rtx_code false_code
;
6367 scalar_int_mode int_mode
, inner_mode
;
6369 /* Simplify storing of the truth value. */
6370 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
6371 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
6372 XEXP (cond
, 0), XEXP (cond
, 1));
6374 /* Also when the truth value has to be reversed. */
6376 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
6377 && (reversed
= reversed_comparison (cond
, mode
)))
6380 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6381 in it is being compared against certain values. Get the true and false
6382 comparisons and see if that says anything about the value of each arm. */
6385 && ((false_code
= reversed_comparison_code (cond
, NULL
))
6387 && REG_P (XEXP (cond
, 0)))
6390 rtx from
= XEXP (cond
, 0);
6391 rtx true_val
= XEXP (cond
, 1);
6392 rtx false_val
= true_val
;
6395 /* If FALSE_CODE is EQ, swap the codes and arms. */
6397 if (false_code
== EQ
)
6399 swapped
= 1, true_code
= EQ
, false_code
= NE
;
6400 std::swap (true_rtx
, false_rtx
);
6403 scalar_int_mode from_mode
;
6404 if (is_a
<scalar_int_mode
> (GET_MODE (from
), &from_mode
))
6406 /* If we are comparing against zero and the expression being
6407 tested has only a single bit that might be nonzero, that is
6408 its value when it is not equal to zero. Similarly if it is
6409 known to be -1 or 0. */
6411 && true_val
== const0_rtx
6412 && pow2p_hwi (nzb
= nonzero_bits (from
, from_mode
)))
6415 false_val
= gen_int_mode (nzb
, from_mode
);
6417 else if (true_code
== EQ
6418 && true_val
== const0_rtx
6419 && (num_sign_bit_copies (from
, from_mode
)
6420 == GET_MODE_PRECISION (from_mode
)))
6423 false_val
= constm1_rtx
;
6427 /* Now simplify an arm if we know the value of the register in the
6428 branch and it is used in the arm. Be careful due to the potential
6429 of locally-shared RTL. */
6431 if (reg_mentioned_p (from
, true_rtx
))
6432 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
6434 pc_rtx
, pc_rtx
, 0, 0, 0);
6435 if (reg_mentioned_p (from
, false_rtx
))
6436 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
6438 pc_rtx
, pc_rtx
, 0, 0, 0);
6440 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
6441 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
6443 true_rtx
= XEXP (x
, 1);
6444 false_rtx
= XEXP (x
, 2);
6445 true_code
= GET_CODE (cond
);
6448 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6449 reversed, do so to avoid needing two sets of patterns for
6450 subtract-and-branch insns. Similarly if we have a constant in the true
6451 arm, the false arm is the same as the first operand of the comparison, or
6452 the false arm is more complicated than the true arm. */
6455 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6456 && (true_rtx
== pc_rtx
6457 || (CONSTANT_P (true_rtx
)
6458 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6459 || true_rtx
== const0_rtx
6460 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6461 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6462 && !OBJECT_P (false_rtx
))
6463 || reg_mentioned_p (true_rtx
, false_rtx
)
6464 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6466 true_code
= reversed_comparison_code (cond
, NULL
);
6467 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6468 SUBST (XEXP (x
, 1), false_rtx
);
6469 SUBST (XEXP (x
, 2), true_rtx
);
6471 std::swap (true_rtx
, false_rtx
);
6474 /* It is possible that the conditional has been simplified out. */
6475 true_code
= GET_CODE (cond
);
6476 comparison_p
= COMPARISON_P (cond
);
6479 /* If the two arms are identical, we don't need the comparison. */
6481 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6484 /* Convert a == b ? b : a to "a". */
6485 if (true_code
== EQ
&& ! side_effects_p (cond
)
6486 && !HONOR_NANS (mode
)
6487 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6488 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6490 else if (true_code
== NE
&& ! side_effects_p (cond
)
6491 && !HONOR_NANS (mode
)
6492 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6493 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6496 /* Look for cases where we have (abs x) or (neg (abs X)). */
6498 if (GET_MODE_CLASS (mode
) == MODE_INT
6500 && XEXP (cond
, 1) == const0_rtx
6501 && GET_CODE (false_rtx
) == NEG
6502 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6503 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6504 && ! side_effects_p (true_rtx
))
6509 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6513 simplify_gen_unary (NEG
, mode
,
6514 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6520 /* Look for MIN or MAX. */
6522 if ((! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
6524 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6525 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6526 && ! side_effects_p (cond
))
6531 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6534 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6537 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6540 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6545 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6546 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6547 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6548 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6549 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6550 neither 1 or -1, but it isn't worth checking for. */
6552 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6554 && is_int_mode (mode
, &int_mode
)
6555 && ! side_effects_p (x
))
6557 rtx t
= make_compound_operation (true_rtx
, SET
);
6558 rtx f
= make_compound_operation (false_rtx
, SET
);
6559 rtx cond_op0
= XEXP (cond
, 0);
6560 rtx cond_op1
= XEXP (cond
, 1);
6561 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6562 scalar_int_mode m
= int_mode
;
6563 rtx z
= 0, c1
= NULL_RTX
;
6565 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6566 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6567 || GET_CODE (t
) == ASHIFT
6568 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6569 && rtx_equal_p (XEXP (t
, 0), f
))
6570 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6572 /* If an identity-zero op is commutative, check whether there
6573 would be a match if we swapped the operands. */
6574 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6575 || GET_CODE (t
) == XOR
)
6576 && rtx_equal_p (XEXP (t
, 1), f
))
6577 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6578 else if (GET_CODE (t
) == SIGN_EXTEND
6579 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6580 && (GET_CODE (XEXP (t
, 0)) == PLUS
6581 || GET_CODE (XEXP (t
, 0)) == MINUS
6582 || GET_CODE (XEXP (t
, 0)) == IOR
6583 || GET_CODE (XEXP (t
, 0)) == XOR
6584 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6585 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6586 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6587 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6588 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6589 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6590 && (num_sign_bit_copies (f
, GET_MODE (f
))
6592 (GET_MODE_PRECISION (int_mode
)
6593 - GET_MODE_PRECISION (inner_mode
))))
6595 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6596 extend_op
= SIGN_EXTEND
;
6599 else if (GET_CODE (t
) == SIGN_EXTEND
6600 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6601 && (GET_CODE (XEXP (t
, 0)) == PLUS
6602 || GET_CODE (XEXP (t
, 0)) == IOR
6603 || GET_CODE (XEXP (t
, 0)) == XOR
)
6604 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6605 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6606 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6607 && (num_sign_bit_copies (f
, GET_MODE (f
))
6609 (GET_MODE_PRECISION (int_mode
)
6610 - GET_MODE_PRECISION (inner_mode
))))
6612 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6613 extend_op
= SIGN_EXTEND
;
6616 else if (GET_CODE (t
) == ZERO_EXTEND
6617 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6618 && (GET_CODE (XEXP (t
, 0)) == PLUS
6619 || GET_CODE (XEXP (t
, 0)) == MINUS
6620 || GET_CODE (XEXP (t
, 0)) == IOR
6621 || GET_CODE (XEXP (t
, 0)) == XOR
6622 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6623 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6624 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6625 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6626 && HWI_COMPUTABLE_MODE_P (int_mode
)
6627 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6628 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6629 && ((nonzero_bits (f
, GET_MODE (f
))
6630 & ~GET_MODE_MASK (inner_mode
))
6633 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6634 extend_op
= ZERO_EXTEND
;
6637 else if (GET_CODE (t
) == ZERO_EXTEND
6638 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6639 && (GET_CODE (XEXP (t
, 0)) == PLUS
6640 || GET_CODE (XEXP (t
, 0)) == IOR
6641 || GET_CODE (XEXP (t
, 0)) == XOR
)
6642 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6643 && HWI_COMPUTABLE_MODE_P (int_mode
)
6644 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6645 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6646 && ((nonzero_bits (f
, GET_MODE (f
))
6647 & ~GET_MODE_MASK (inner_mode
))
6650 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6651 extend_op
= ZERO_EXTEND
;
6657 machine_mode cm
= m
;
6658 if ((op
== ASHIFT
|| op
== LSHIFTRT
|| op
== ASHIFTRT
)
6659 && GET_MODE (c1
) != VOIDmode
)
6661 temp
= subst (simplify_gen_relational (true_code
, cm
, VOIDmode
,
6662 cond_op0
, cond_op1
),
6663 pc_rtx
, pc_rtx
, 0, 0, 0);
6664 temp
= simplify_gen_binary (MULT
, cm
, temp
,
6665 simplify_gen_binary (MULT
, cm
, c1
,
6667 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0, 0);
6668 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6670 if (extend_op
!= UNKNOWN
)
6671 temp
= simplify_gen_unary (extend_op
, int_mode
, temp
, m
);
6677 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6678 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6679 negation of a single bit, we can convert this operation to a shift. We
6680 can actually do this more generally, but it doesn't seem worth it. */
6683 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6684 && XEXP (cond
, 1) == const0_rtx
6685 && false_rtx
== const0_rtx
6686 && CONST_INT_P (true_rtx
)
6687 && ((nonzero_bits (XEXP (cond
, 0), int_mode
) == 1
6688 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6689 || ((num_sign_bit_copies (XEXP (cond
, 0), int_mode
)
6690 == GET_MODE_PRECISION (int_mode
))
6691 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6693 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6694 gen_lowpart (int_mode
, XEXP (cond
, 0)), i
);
6696 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6697 non-zero bit in A is C1. */
6698 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6699 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6700 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6701 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (cond
, 0)), &inner_mode
)
6702 && (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))
6703 == nonzero_bits (XEXP (cond
, 0), inner_mode
)
6704 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))) >= 0)
6706 rtx val
= XEXP (cond
, 0);
6707 if (inner_mode
== int_mode
)
6709 else if (GET_MODE_PRECISION (inner_mode
) < GET_MODE_PRECISION (int_mode
))
6710 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, val
, inner_mode
);
6716 /* Simplify X, a SET expression. Return the new expression. */
6719 simplify_set (rtx x
)
6721 rtx src
= SET_SRC (x
);
6722 rtx dest
= SET_DEST (x
);
6724 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6725 rtx_insn
*other_insn
;
6727 scalar_int_mode int_mode
;
6729 /* (set (pc) (return)) gets written as (return). */
6730 if (GET_CODE (dest
) == PC
&& ANY_RETURN_P (src
))
6733 /* Now that we know for sure which bits of SRC we are using, see if we can
6734 simplify the expression for the object knowing that we only need the
6737 if (GET_MODE_CLASS (mode
) == MODE_INT
&& HWI_COMPUTABLE_MODE_P (mode
))
6739 src
= force_to_mode (src
, mode
, HOST_WIDE_INT_M1U
, 0);
6740 SUBST (SET_SRC (x
), src
);
6743 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6744 the comparison result and try to simplify it unless we already have used
6745 undobuf.other_insn. */
6746 if ((GET_MODE_CLASS (mode
) == MODE_CC
6747 || GET_CODE (src
) == COMPARE
6749 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6750 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6751 && COMPARISON_P (*cc_use
)
6752 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6754 enum rtx_code old_code
= GET_CODE (*cc_use
);
6755 enum rtx_code new_code
;
6757 int other_changed
= 0;
6758 rtx inner_compare
= NULL_RTX
;
6759 machine_mode compare_mode
= GET_MODE (dest
);
6761 if (GET_CODE (src
) == COMPARE
)
6763 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6764 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6766 inner_compare
= op0
;
6767 op0
= XEXP (inner_compare
, 0), op1
= XEXP (inner_compare
, 1);
6771 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6773 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6776 new_code
= old_code
;
6777 else if (!CONSTANT_P (tmp
))
6779 new_code
= GET_CODE (tmp
);
6780 op0
= XEXP (tmp
, 0);
6781 op1
= XEXP (tmp
, 1);
6785 rtx pat
= PATTERN (other_insn
);
6786 undobuf
.other_insn
= other_insn
;
6787 SUBST (*cc_use
, tmp
);
6789 /* Attempt to simplify CC user. */
6790 if (GET_CODE (pat
) == SET
)
6792 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6793 if (new_rtx
!= NULL_RTX
)
6794 SUBST (SET_SRC (pat
), new_rtx
);
6797 /* Convert X into a no-op move. */
6798 SUBST (SET_DEST (x
), pc_rtx
);
6799 SUBST (SET_SRC (x
), pc_rtx
);
6803 /* Simplify our comparison, if possible. */
6804 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6806 #ifdef SELECT_CC_MODE
6807 /* If this machine has CC modes other than CCmode, check to see if we
6808 need to use a different CC mode here. */
6809 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6810 compare_mode
= GET_MODE (op0
);
6811 else if (inner_compare
6812 && GET_MODE_CLASS (GET_MODE (inner_compare
)) == MODE_CC
6813 && new_code
== old_code
6814 && op0
== XEXP (inner_compare
, 0)
6815 && op1
== XEXP (inner_compare
, 1))
6816 compare_mode
= GET_MODE (inner_compare
);
6818 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6820 /* If the mode changed, we have to change SET_DEST, the mode in the
6821 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6822 a hard register, just build new versions with the proper mode. If it
6823 is a pseudo, we lose unless it is only time we set the pseudo, in
6824 which case we can safely change its mode. */
6825 if (!HAVE_cc0
&& compare_mode
!= GET_MODE (dest
))
6827 if (can_change_dest_mode (dest
, 0, compare_mode
))
6829 unsigned int regno
= REGNO (dest
);
6832 if (regno
< FIRST_PSEUDO_REGISTER
)
6833 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6836 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6837 new_dest
= regno_reg_rtx
[regno
];
6840 SUBST (SET_DEST (x
), new_dest
);
6841 SUBST (XEXP (*cc_use
, 0), new_dest
);
6847 #endif /* SELECT_CC_MODE */
6849 /* If the code changed, we have to build a new comparison in
6850 undobuf.other_insn. */
6851 if (new_code
!= old_code
)
6853 int other_changed_previously
= other_changed
;
6854 unsigned HOST_WIDE_INT mask
;
6855 rtx old_cc_use
= *cc_use
;
6857 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6861 /* If the only change we made was to change an EQ into an NE or
6862 vice versa, OP0 has only one bit that might be nonzero, and OP1
6863 is zero, check if changing the user of the condition code will
6864 produce a valid insn. If it won't, we can keep the original code
6865 in that insn by surrounding our operation with an XOR. */
6867 if (((old_code
== NE
&& new_code
== EQ
)
6868 || (old_code
== EQ
&& new_code
== NE
))
6869 && ! other_changed_previously
&& op1
== const0_rtx
6870 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
6871 && pow2p_hwi (mask
= nonzero_bits (op0
, GET_MODE (op0
))))
6873 rtx pat
= PATTERN (other_insn
), note
= 0;
6875 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
6876 && ! check_asm_operands (pat
)))
6878 *cc_use
= old_cc_use
;
6881 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
), op0
,
6889 undobuf
.other_insn
= other_insn
;
6891 /* Don't generate a compare of a CC with 0, just use that CC. */
6892 if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
6894 SUBST (SET_SRC (x
), op0
);
6897 /* Otherwise, if we didn't previously have the same COMPARE we
6898 want, create it from scratch. */
6899 else if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
6900 || XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
6902 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6908 /* Get SET_SRC in a form where we have placed back any
6909 compound expressions. Then do the checks below. */
6910 src
= make_compound_operation (src
, SET
);
6911 SUBST (SET_SRC (x
), src
);
6914 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6915 and X being a REG or (subreg (reg)), we may be able to convert this to
6916 (set (subreg:m2 x) (op)).
6918 We can always do this if M1 is narrower than M2 because that means that
6919 we only care about the low bits of the result.
6921 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6922 perform a narrower operation than requested since the high-order bits will
6923 be undefined. On machine where it is defined, this transformation is safe
6924 as long as M1 and M2 have the same number of words. */
6926 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6927 && !OBJECT_P (SUBREG_REG (src
))
6928 && (((GET_MODE_SIZE (GET_MODE (src
)) + (UNITS_PER_WORD
- 1))
6930 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
)))
6931 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))
6932 && (WORD_REGISTER_OPERATIONS
|| !paradoxical_subreg_p (src
))
6933 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
6934 && !REG_CAN_CHANGE_MODE_P (REGNO (dest
),
6935 GET_MODE (SUBREG_REG (src
)),
6938 || (GET_CODE (dest
) == SUBREG
6939 && REG_P (SUBREG_REG (dest
)))))
6941 SUBST (SET_DEST (x
),
6942 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
6944 SUBST (SET_SRC (x
), SUBREG_REG (src
));
6946 src
= SET_SRC (x
), dest
= SET_DEST (x
);
6949 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6952 && partial_subreg_p (src
)
6953 && subreg_lowpart_p (src
))
6955 rtx inner
= SUBREG_REG (src
);
6956 machine_mode inner_mode
= GET_MODE (inner
);
6958 /* Here we make sure that we don't have a sign bit on. */
6959 if (val_signbit_known_clear_p (GET_MODE (src
),
6960 nonzero_bits (inner
, inner_mode
)))
6962 SUBST (SET_SRC (x
), inner
);
6967 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6968 would require a paradoxical subreg. Replace the subreg with a
6969 zero_extend to avoid the reload that would otherwise be required.
6970 Don't do this for vector modes, as the transformation is incorrect. */
6972 enum rtx_code extend_op
;
6973 if (paradoxical_subreg_p (src
)
6974 && MEM_P (SUBREG_REG (src
))
6975 && !VECTOR_MODE_P (GET_MODE (src
))
6976 && (extend_op
= load_extend_op (GET_MODE (SUBREG_REG (src
)))) != UNKNOWN
)
6979 gen_rtx_fmt_e (extend_op
, GET_MODE (src
), SUBREG_REG (src
)));
6984 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6985 are comparing an item known to be 0 or -1 against 0, use a logical
6986 operation instead. Check for one of the arms being an IOR of the other
6987 arm with some value. We compute three terms to be IOR'ed together. In
6988 practice, at most two will be nonzero. Then we do the IOR's. */
6990 if (GET_CODE (dest
) != PC
6991 && GET_CODE (src
) == IF_THEN_ELSE
6992 && is_int_mode (GET_MODE (src
), &int_mode
)
6993 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
6994 && XEXP (XEXP (src
, 0), 1) == const0_rtx
6995 && int_mode
== GET_MODE (XEXP (XEXP (src
, 0), 0))
6996 && (!HAVE_conditional_move
6997 || ! can_conditionally_move_p (int_mode
))
6998 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0), int_mode
)
6999 == GET_MODE_PRECISION (int_mode
))
7000 && ! side_effects_p (src
))
7002 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
7003 ? XEXP (src
, 1) : XEXP (src
, 2));
7004 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
7005 ? XEXP (src
, 2) : XEXP (src
, 1));
7006 rtx term1
= const0_rtx
, term2
, term3
;
7008 if (GET_CODE (true_rtx
) == IOR
7009 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
7010 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
7011 else if (GET_CODE (true_rtx
) == IOR
7012 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
7013 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
7014 else if (GET_CODE (false_rtx
) == IOR
7015 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
7016 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
7017 else if (GET_CODE (false_rtx
) == IOR
7018 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
7019 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
7021 term2
= simplify_gen_binary (AND
, int_mode
,
7022 XEXP (XEXP (src
, 0), 0), true_rtx
);
7023 term3
= simplify_gen_binary (AND
, int_mode
,
7024 simplify_gen_unary (NOT
, int_mode
,
7025 XEXP (XEXP (src
, 0), 0),
7030 simplify_gen_binary (IOR
, int_mode
,
7031 simplify_gen_binary (IOR
, int_mode
,
7038 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7039 whole thing fail. */
7040 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
7042 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
7045 /* Convert this into a field assignment operation, if possible. */
7046 return make_field_assignment (x
);
7049 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7053 simplify_logical (rtx x
)
7055 rtx op0
= XEXP (x
, 0);
7056 rtx op1
= XEXP (x
, 1);
7057 scalar_int_mode mode
;
7059 switch (GET_CODE (x
))
7062 /* We can call simplify_and_const_int only if we don't lose
7063 any (sign) bits when converting INTVAL (op1) to
7064 "unsigned HOST_WIDE_INT". */
7065 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
7066 && CONST_INT_P (op1
)
7067 && (HWI_COMPUTABLE_MODE_P (mode
)
7068 || INTVAL (op1
) > 0))
7070 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
7071 if (GET_CODE (x
) != AND
)
7078 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7079 apply the distributive law and then the inverse distributive
7080 law to see if things simplify. */
7081 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
7083 rtx result
= distribute_and_simplify_rtx (x
, 0);
7087 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
7089 rtx result
= distribute_and_simplify_rtx (x
, 1);
7096 /* If we have (ior (and A B) C), apply the distributive law and then
7097 the inverse distributive law to see if things simplify. */
7099 if (GET_CODE (op0
) == AND
)
7101 rtx result
= distribute_and_simplify_rtx (x
, 0);
7106 if (GET_CODE (op1
) == AND
)
7108 rtx result
= distribute_and_simplify_rtx (x
, 1);
7121 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7122 operations" because they can be replaced with two more basic operations.
7123 ZERO_EXTEND is also considered "compound" because it can be replaced with
7124 an AND operation, which is simpler, though only one operation.
7126 The function expand_compound_operation is called with an rtx expression
7127 and will convert it to the appropriate shifts and AND operations,
7128 simplifying at each stage.
7130 The function make_compound_operation is called to convert an expression
7131 consisting of shifts and ANDs into the equivalent compound expression.
7132 It is the inverse of this function, loosely speaking. */
7135 expand_compound_operation (rtx x
)
7137 unsigned HOST_WIDE_INT pos
= 0, len
;
7139 unsigned int modewidth
;
7141 scalar_int_mode inner_mode
;
7143 switch (GET_CODE (x
))
7149 /* We can't necessarily use a const_int for a multiword mode;
7150 it depends on implicitly extending the value.
7151 Since we don't know the right way to extend it,
7152 we can't tell whether the implicit way is right.
7154 Even for a mode that is no wider than a const_int,
7155 we can't win, because we need to sign extend one of its bits through
7156 the rest of it, and we don't know which bit. */
7157 if (CONST_INT_P (XEXP (x
, 0)))
7160 /* Reject modes that aren't scalar integers because turning vector
7161 or complex modes into shifts causes problems. */
7162 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7165 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7166 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7167 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7168 reloaded. If not for that, MEM's would very rarely be safe.
7170 Reject modes bigger than a word, because we might not be able
7171 to reference a two-register group starting with an arbitrary register
7172 (and currently gen_lowpart might crash for a SUBREG). */
7174 if (GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7177 len
= GET_MODE_PRECISION (inner_mode
);
7178 /* If the inner object has VOIDmode (the only way this can happen
7179 is if it is an ASM_OPERANDS), we can't do anything since we don't
7180 know how much masking to do. */
7192 /* If the operand is a CLOBBER, just return it. */
7193 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
7196 if (!CONST_INT_P (XEXP (x
, 1))
7197 || !CONST_INT_P (XEXP (x
, 2)))
7200 /* Reject modes that aren't scalar integers because turning vector
7201 or complex modes into shifts causes problems. */
7202 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7205 len
= INTVAL (XEXP (x
, 1));
7206 pos
= INTVAL (XEXP (x
, 2));
7208 /* This should stay within the object being extracted, fail otherwise. */
7209 if (len
+ pos
> GET_MODE_PRECISION (inner_mode
))
7212 if (BITS_BIG_ENDIAN
)
7213 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
7221 /* We've rejected non-scalar operations by now. */
7222 scalar_int_mode mode
= as_a
<scalar_int_mode
> (GET_MODE (x
));
7224 /* Convert sign extension to zero extension, if we know that the high
7225 bit is not set, as this is easier to optimize. It will be converted
7226 back to cheaper alternative in make_extraction. */
7227 if (GET_CODE (x
) == SIGN_EXTEND
7228 && HWI_COMPUTABLE_MODE_P (mode
)
7229 && ((nonzero_bits (XEXP (x
, 0), inner_mode
)
7230 & ~(((unsigned HOST_WIDE_INT
) GET_MODE_MASK (inner_mode
)) >> 1))
7233 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, XEXP (x
, 0));
7234 rtx temp2
= expand_compound_operation (temp
);
7236 /* Make sure this is a profitable operation. */
7237 if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7238 > set_src_cost (temp2
, mode
, optimize_this_for_speed_p
))
7240 else if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7241 > set_src_cost (temp
, mode
, optimize_this_for_speed_p
))
7247 /* We can optimize some special cases of ZERO_EXTEND. */
7248 if (GET_CODE (x
) == ZERO_EXTEND
)
7250 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7251 know that the last value didn't have any inappropriate bits
7253 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7254 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7255 && HWI_COMPUTABLE_MODE_P (mode
)
7256 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), mode
)
7257 & ~GET_MODE_MASK (inner_mode
)) == 0)
7258 return XEXP (XEXP (x
, 0), 0);
7260 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7261 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7262 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7263 && subreg_lowpart_p (XEXP (x
, 0))
7264 && HWI_COMPUTABLE_MODE_P (mode
)
7265 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), mode
)
7266 & ~GET_MODE_MASK (inner_mode
)) == 0)
7267 return SUBREG_REG (XEXP (x
, 0));
7269 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7270 is a comparison and STORE_FLAG_VALUE permits. This is like
7271 the first case, but it works even when MODE is larger
7272 than HOST_WIDE_INT. */
7273 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7274 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7275 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
7276 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7277 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7278 return XEXP (XEXP (x
, 0), 0);
7280 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7281 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7282 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7283 && subreg_lowpart_p (XEXP (x
, 0))
7284 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
7285 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7286 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7287 return SUBREG_REG (XEXP (x
, 0));
7291 /* If we reach here, we want to return a pair of shifts. The inner
7292 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7293 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7294 logical depending on the value of UNSIGNEDP.
7296 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7297 converted into an AND of a shift.
7299 We must check for the case where the left shift would have a negative
7300 count. This can happen in a case like (x >> 31) & 255 on machines
7301 that can't shift by a constant. On those machines, we would first
7302 combine the shift with the AND to produce a variable-position
7303 extraction. Then the constant of 31 would be substituted in
7304 to produce such a position. */
7306 modewidth
= GET_MODE_PRECISION (mode
);
7307 if (modewidth
>= pos
+ len
)
7309 tem
= gen_lowpart (mode
, XEXP (x
, 0));
7310 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7312 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
7313 tem
, modewidth
- pos
- len
);
7314 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
7315 mode
, tem
, modewidth
- len
);
7317 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
7318 tem
= simplify_and_const_int (NULL_RTX
, mode
,
7319 simplify_shift_const (NULL_RTX
, LSHIFTRT
,
7322 (HOST_WIDE_INT_1U
<< len
) - 1);
7324 /* Any other cases we can't handle. */
7327 /* If we couldn't do this for some reason, return the original
7329 if (GET_CODE (tem
) == CLOBBER
)
7335 /* X is a SET which contains an assignment of one object into
7336 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7337 or certain SUBREGS). If possible, convert it into a series of
7340 We half-heartedly support variable positions, but do not at all
7341 support variable lengths. */
7344 expand_field_assignment (const_rtx x
)
7347 rtx pos
; /* Always counts from low bit. */
7349 rtx mask
, cleared
, masked
;
7350 scalar_int_mode compute_mode
;
7352 /* Loop until we find something we can't simplify. */
7355 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
7356 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
7358 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
7359 len
= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x
), 0)));
7360 pos
= gen_int_mode (subreg_lsb (XEXP (SET_DEST (x
), 0)),
7363 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
7364 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
7366 inner
= XEXP (SET_DEST (x
), 0);
7367 len
= INTVAL (XEXP (SET_DEST (x
), 1));
7368 pos
= XEXP (SET_DEST (x
), 2);
7370 /* A constant position should stay within the width of INNER. */
7371 if (CONST_INT_P (pos
)
7372 && INTVAL (pos
) + len
> GET_MODE_PRECISION (GET_MODE (inner
)))
7375 if (BITS_BIG_ENDIAN
)
7377 if (CONST_INT_P (pos
))
7378 pos
= GEN_INT (GET_MODE_PRECISION (GET_MODE (inner
)) - len
7380 else if (GET_CODE (pos
) == MINUS
7381 && CONST_INT_P (XEXP (pos
, 1))
7382 && (INTVAL (XEXP (pos
, 1))
7383 == GET_MODE_PRECISION (GET_MODE (inner
)) - len
))
7384 /* If position is ADJUST - X, new position is X. */
7385 pos
= XEXP (pos
, 0);
7388 HOST_WIDE_INT prec
= GET_MODE_PRECISION (GET_MODE (inner
));
7389 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
7390 gen_int_mode (prec
- len
,
7397 /* If the destination is a subreg that overwrites the whole of the inner
7398 register, we can move the subreg to the source. */
7399 else if (GET_CODE (SET_DEST (x
)) == SUBREG
7400 /* We need SUBREGs to compute nonzero_bits properly. */
7401 && nonzero_sign_valid
7402 && !read_modify_subreg_p (SET_DEST (x
)))
7404 x
= gen_rtx_SET (SUBREG_REG (SET_DEST (x
)),
7406 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
7413 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7414 inner
= SUBREG_REG (inner
);
7416 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7417 if (!is_a
<scalar_int_mode
> (GET_MODE (inner
), &compute_mode
))
7419 /* Don't do anything for vector or complex integral types. */
7420 if (! FLOAT_MODE_P (GET_MODE (inner
)))
7423 /* Try to find an integral mode to pun with. */
7424 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner
)), 0)
7425 .exists (&compute_mode
))
7428 inner
= gen_lowpart (compute_mode
, inner
);
7431 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7432 if (len
>= HOST_BITS_PER_WIDE_INT
)
7435 /* Don't try to compute in too wide unsupported modes. */
7436 if (!targetm
.scalar_mode_supported_p (compute_mode
))
7439 /* Now compute the equivalent expression. Make a copy of INNER
7440 for the SET_DEST in case it is a MEM into which we will substitute;
7441 we don't want shared RTL in that case. */
7442 mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< len
) - 1,
7444 cleared
= simplify_gen_binary (AND
, compute_mode
,
7445 simplify_gen_unary (NOT
, compute_mode
,
7446 simplify_gen_binary (ASHIFT
,
7451 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
7452 simplify_gen_binary (
7454 gen_lowpart (compute_mode
, SET_SRC (x
)),
7458 x
= gen_rtx_SET (copy_rtx (inner
),
7459 simplify_gen_binary (IOR
, compute_mode
,
7466 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7467 it is an RTX that represents the (variable) starting position; otherwise,
7468 POS is the (constant) starting bit position. Both are counted from the LSB.
7470 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7472 IN_DEST is nonzero if this is a reference in the destination of a SET.
7473 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7474 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7477 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7478 ZERO_EXTRACT should be built even for bits starting at bit 0.
7480 MODE is the desired mode of the result (if IN_DEST == 0).
7482 The result is an RTX for the extraction or NULL_RTX if the target
7486 make_extraction (machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7487 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7488 int in_dest
, int in_compare
)
7490 /* This mode describes the size of the storage area
7491 to fetch the overall value from. Within that, we
7492 ignore the POS lowest bits, etc. */
7493 machine_mode is_mode
= GET_MODE (inner
);
7494 machine_mode inner_mode
;
7495 scalar_int_mode wanted_inner_mode
;
7496 scalar_int_mode wanted_inner_reg_mode
= word_mode
;
7497 scalar_int_mode pos_mode
= word_mode
;
7498 machine_mode extraction_mode
= word_mode
;
7500 rtx orig_pos_rtx
= pos_rtx
;
7501 HOST_WIDE_INT orig_pos
;
7503 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7504 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7506 if (GET_CODE (inner
) == SUBREG
7507 && subreg_lowpart_p (inner
)
7508 && (paradoxical_subreg_p (inner
)
7509 /* If trying or potentionally trying to extract
7510 bits outside of is_mode, don't look through
7511 non-paradoxical SUBREGs. See PR82192. */
7512 || (pos_rtx
== NULL_RTX
7513 && pos
+ len
<= GET_MODE_PRECISION (is_mode
))))
7515 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7516 consider just the QI as the memory to extract from.
7517 The subreg adds or removes high bits; its mode is
7518 irrelevant to the meaning of this extraction,
7519 since POS and LEN count from the lsb. */
7520 if (MEM_P (SUBREG_REG (inner
)))
7521 is_mode
= GET_MODE (SUBREG_REG (inner
));
7522 inner
= SUBREG_REG (inner
);
7524 else if (GET_CODE (inner
) == ASHIFT
7525 && CONST_INT_P (XEXP (inner
, 1))
7526 && pos_rtx
== 0 && pos
== 0
7527 && len
> UINTVAL (XEXP (inner
, 1)))
7529 /* We're extracting the least significant bits of an rtx
7530 (ashift X (const_int C)), where LEN > C. Extract the
7531 least significant (LEN - C) bits of X, giving an rtx
7532 whose mode is MODE, then shift it left C times. */
7533 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7534 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7535 unsignedp
, in_dest
, in_compare
);
7537 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7539 else if (GET_CODE (inner
) == TRUNCATE
7540 /* If trying or potentionally trying to extract
7541 bits outside of is_mode, don't look through
7542 TRUNCATE. See PR82192. */
7543 && pos_rtx
== NULL_RTX
7544 && pos
+ len
<= GET_MODE_PRECISION (is_mode
))
7545 inner
= XEXP (inner
, 0);
7547 inner_mode
= GET_MODE (inner
);
7549 /* See if this can be done without an extraction. We never can if the
7550 width of the field is not the same as that of some integer mode. For
7551 registers, we can only avoid the extraction if the position is at the
7552 low-order bit and this is either not in the destination or we have the
7553 appropriate STRICT_LOW_PART operation available.
7555 For MEM, we can avoid an extract if the field starts on an appropriate
7556 boundary and we can change the mode of the memory reference. */
7558 scalar_int_mode tmode
;
7559 if (int_mode_for_size (len
, 1).exists (&tmode
)
7560 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7562 && (pos
== 0 || REG_P (inner
))
7563 && (inner_mode
== tmode
7565 || TRULY_NOOP_TRUNCATION_MODES_P (tmode
, inner_mode
)
7566 || reg_truncated_to_mode (tmode
, inner
))
7569 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7570 || (MEM_P (inner
) && pos_rtx
== 0
7572 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7573 : BITS_PER_UNIT
)) == 0
7574 /* We can't do this if we are widening INNER_MODE (it
7575 may not be aligned, for one thing). */
7576 && !paradoxical_subreg_p (tmode
, inner_mode
)
7577 && (inner_mode
== tmode
7578 || (! mode_dependent_address_p (XEXP (inner
, 0),
7579 MEM_ADDR_SPACE (inner
))
7580 && ! MEM_VOLATILE_P (inner
))))))
7582 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7583 field. If the original and current mode are the same, we need not
7584 adjust the offset. Otherwise, we do if bytes big endian.
7586 If INNER is not a MEM, get a piece consisting of just the field
7587 of interest (in this case POS % BITS_PER_WORD must be 0). */
7591 HOST_WIDE_INT offset
;
7593 /* POS counts from lsb, but make OFFSET count in memory order. */
7594 if (BYTES_BIG_ENDIAN
)
7595 offset
= (GET_MODE_PRECISION (is_mode
) - len
- pos
) / BITS_PER_UNIT
;
7597 offset
= pos
/ BITS_PER_UNIT
;
7599 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7601 else if (REG_P (inner
))
7603 if (tmode
!= inner_mode
)
7605 /* We can't call gen_lowpart in a DEST since we
7606 always want a SUBREG (see below) and it would sometimes
7607 return a new hard register. */
7611 = subreg_offset_from_lsb (tmode
, inner_mode
, pos
);
7613 /* Avoid creating invalid subregs, for example when
7614 simplifying (x>>32)&255. */
7615 if (!validate_subreg (tmode
, inner_mode
, inner
, offset
))
7618 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, offset
);
7621 new_rtx
= gen_lowpart (tmode
, inner
);
7627 new_rtx
= force_to_mode (inner
, tmode
,
7628 len
>= HOST_BITS_PER_WIDE_INT
7630 : (HOST_WIDE_INT_1U
<< len
) - 1, 0);
7632 /* If this extraction is going into the destination of a SET,
7633 make a STRICT_LOW_PART unless we made a MEM. */
7636 return (MEM_P (new_rtx
) ? new_rtx
7637 : (GET_CODE (new_rtx
) != SUBREG
7638 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7639 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7644 if (CONST_SCALAR_INT_P (new_rtx
))
7645 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7646 mode
, new_rtx
, tmode
);
7648 /* If we know that no extraneous bits are set, and that the high
7649 bit is not set, convert the extraction to the cheaper of
7650 sign and zero extension, that are equivalent in these cases. */
7651 if (flag_expensive_optimizations
7652 && (HWI_COMPUTABLE_MODE_P (tmode
)
7653 && ((nonzero_bits (new_rtx
, tmode
)
7654 & ~(((unsigned HOST_WIDE_INT
)GET_MODE_MASK (tmode
)) >> 1))
7657 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7658 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7660 /* Prefer ZERO_EXTENSION, since it gives more information to
7662 if (set_src_cost (temp
, mode
, optimize_this_for_speed_p
)
7663 <= set_src_cost (temp1
, mode
, optimize_this_for_speed_p
))
7668 /* Otherwise, sign- or zero-extend unless we already are in the
7671 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7675 /* Unless this is a COMPARE or we have a funny memory reference,
7676 don't do anything with zero-extending field extracts starting at
7677 the low-order bit since they are simple AND operations. */
7678 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7679 && ! in_compare
&& unsignedp
)
7682 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7683 if the position is not a constant and the length is not 1. In all
7684 other cases, we would only be going outside our object in cases when
7685 an original shift would have been undefined. */
7687 && ((pos_rtx
== 0 && pos
+ len
> GET_MODE_PRECISION (is_mode
))
7688 || (pos_rtx
!= 0 && len
!= 1)))
7691 enum extraction_pattern pattern
= (in_dest
? EP_insv
7692 : unsignedp
? EP_extzv
: EP_extv
);
7694 /* If INNER is not from memory, we want it to have the mode of a register
7695 extraction pattern's structure operand, or word_mode if there is no
7696 such pattern. The same applies to extraction_mode and pos_mode
7697 and their respective operands.
7699 For memory, assume that the desired extraction_mode and pos_mode
7700 are the same as for a register operation, since at present we don't
7701 have named patterns for aligned memory structures. */
7702 struct extraction_insn insn
;
7703 if (get_best_reg_extraction_insn (&insn
, pattern
,
7704 GET_MODE_BITSIZE (inner_mode
), mode
))
7706 wanted_inner_reg_mode
= insn
.struct_mode
.require ();
7707 pos_mode
= insn
.pos_mode
;
7708 extraction_mode
= insn
.field_mode
;
7711 /* Never narrow an object, since that might not be safe. */
7713 if (mode
!= VOIDmode
7714 && partial_subreg_p (extraction_mode
, mode
))
7715 extraction_mode
= mode
;
7718 wanted_inner_mode
= wanted_inner_reg_mode
;
7721 /* Be careful not to go beyond the extracted object and maintain the
7722 natural alignment of the memory. */
7723 wanted_inner_mode
= smallest_int_mode_for_size (len
);
7724 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7725 > GET_MODE_BITSIZE (wanted_inner_mode
))
7726 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
).require ();
7731 if (BITS_BIG_ENDIAN
)
7733 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7734 BITS_BIG_ENDIAN style. If position is constant, compute new
7735 position. Otherwise, build subtraction.
7736 Note that POS is relative to the mode of the original argument.
7737 If it's a MEM we need to recompute POS relative to that.
7738 However, if we're extracting from (or inserting into) a register,
7739 we want to recompute POS relative to wanted_inner_mode. */
7740 int width
= (MEM_P (inner
)
7741 ? GET_MODE_BITSIZE (is_mode
)
7742 : GET_MODE_BITSIZE (wanted_inner_mode
));
7745 pos
= width
- len
- pos
;
7748 = gen_rtx_MINUS (GET_MODE (pos_rtx
),
7749 gen_int_mode (width
- len
, GET_MODE (pos_rtx
)),
7751 /* POS may be less than 0 now, but we check for that below.
7752 Note that it can only be less than 0 if !MEM_P (inner). */
7755 /* If INNER has a wider mode, and this is a constant extraction, try to
7756 make it smaller and adjust the byte to point to the byte containing
7758 if (wanted_inner_mode
!= VOIDmode
7759 && inner_mode
!= wanted_inner_mode
7761 && partial_subreg_p (wanted_inner_mode
, is_mode
)
7763 && ! mode_dependent_address_p (XEXP (inner
, 0), MEM_ADDR_SPACE (inner
))
7764 && ! MEM_VOLATILE_P (inner
))
7768 /* The computations below will be correct if the machine is big
7769 endian in both bits and bytes or little endian in bits and bytes.
7770 If it is mixed, we must adjust. */
7772 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7773 adjust OFFSET to compensate. */
7774 if (BYTES_BIG_ENDIAN
7775 && paradoxical_subreg_p (is_mode
, inner_mode
))
7776 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7778 /* We can now move to the desired byte. */
7779 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7780 * GET_MODE_SIZE (wanted_inner_mode
);
7781 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7783 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7784 && is_mode
!= wanted_inner_mode
)
7785 offset
= (GET_MODE_SIZE (is_mode
)
7786 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7788 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7791 /* If INNER is not memory, get it into the proper mode. If we are changing
7792 its mode, POS must be a constant and smaller than the size of the new
7794 else if (!MEM_P (inner
))
7796 /* On the LHS, don't create paradoxical subregs implicitely truncating
7797 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7799 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner
),
7803 if (GET_MODE (inner
) != wanted_inner_mode
7805 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7811 inner
= force_to_mode (inner
, wanted_inner_mode
,
7813 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7815 : (((HOST_WIDE_INT_1U
<< len
) - 1)
7820 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7821 have to zero extend. Otherwise, we can just use a SUBREG.
7823 We dealt with constant rtxes earlier, so pos_rtx cannot
7824 have VOIDmode at this point. */
7826 && (GET_MODE_SIZE (pos_mode
)
7827 > GET_MODE_SIZE (as_a
<scalar_int_mode
> (GET_MODE (pos_rtx
)))))
7829 rtx temp
= simplify_gen_unary (ZERO_EXTEND
, pos_mode
, pos_rtx
,
7830 GET_MODE (pos_rtx
));
7832 /* If we know that no extraneous bits are set, and that the high
7833 bit is not set, convert extraction to cheaper one - either
7834 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7836 if (flag_expensive_optimizations
7837 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx
))
7838 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7839 & ~(((unsigned HOST_WIDE_INT
)
7840 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7844 rtx temp1
= simplify_gen_unary (SIGN_EXTEND
, pos_mode
, pos_rtx
,
7845 GET_MODE (pos_rtx
));
7847 /* Prefer ZERO_EXTENSION, since it gives more information to
7849 if (set_src_cost (temp1
, pos_mode
, optimize_this_for_speed_p
)
7850 < set_src_cost (temp
, pos_mode
, optimize_this_for_speed_p
))
7856 /* Make POS_RTX unless we already have it and it is correct. If we don't
7857 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7859 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
7860 pos_rtx
= orig_pos_rtx
;
7862 else if (pos_rtx
== 0)
7863 pos_rtx
= GEN_INT (pos
);
7865 /* Make the required operation. See if we can use existing rtx. */
7866 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
7867 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
7869 new_rtx
= gen_lowpart (mode
, new_rtx
);
7874 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7875 can be commuted with any other operations in X. Return X without
7876 that shift if so. */
7879 extract_left_shift (scalar_int_mode mode
, rtx x
, int count
)
7881 enum rtx_code code
= GET_CODE (x
);
7887 /* This is the shift itself. If it is wide enough, we will return
7888 either the value being shifted if the shift count is equal to
7889 COUNT or a shift for the difference. */
7890 if (CONST_INT_P (XEXP (x
, 1))
7891 && INTVAL (XEXP (x
, 1)) >= count
)
7892 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
7893 INTVAL (XEXP (x
, 1)) - count
);
7897 if ((tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
7898 return simplify_gen_unary (code
, mode
, tem
, mode
);
7902 case PLUS
: case IOR
: case XOR
: case AND
:
7903 /* If we can safely shift this constant and we find the inner shift,
7904 make a new operation. */
7905 if (CONST_INT_P (XEXP (x
, 1))
7906 && (UINTVAL (XEXP (x
, 1))
7907 & (((HOST_WIDE_INT_1U
<< count
)) - 1)) == 0
7908 && (tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
7910 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1)) >> count
;
7911 return simplify_gen_binary (code
, mode
, tem
,
7912 gen_int_mode (val
, mode
));
7923 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7924 level of the expression and MODE is its mode. IN_CODE is as for
7925 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7926 that should be used when recursing on operands of *X_PTR.
7928 There are two possible actions:
7930 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7931 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7933 - Return a new rtx, which the caller returns directly. */
7936 make_compound_operation_int (scalar_int_mode mode
, rtx
*x_ptr
,
7937 enum rtx_code in_code
,
7938 enum rtx_code
*next_code_ptr
)
7941 enum rtx_code next_code
= *next_code_ptr
;
7942 enum rtx_code code
= GET_CODE (x
);
7943 int mode_width
= GET_MODE_PRECISION (mode
);
7948 scalar_int_mode inner_mode
;
7949 bool equality_comparison
= false;
7953 equality_comparison
= true;
7957 /* Process depending on the code of this operation. If NEW is set
7958 nonzero, it will be returned. */
7963 /* Convert shifts by constants into multiplications if inside
7965 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
7966 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
7967 && INTVAL (XEXP (x
, 1)) >= 0)
7969 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
7970 HOST_WIDE_INT multval
= HOST_WIDE_INT_1
<< count
;
7972 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7973 if (GET_CODE (new_rtx
) == NEG
)
7975 new_rtx
= XEXP (new_rtx
, 0);
7978 multval
= trunc_int_for_mode (multval
, mode
);
7979 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, gen_int_mode (multval
, mode
));
7986 lhs
= make_compound_operation (lhs
, next_code
);
7987 rhs
= make_compound_operation (rhs
, next_code
);
7988 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
)
7990 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
7992 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7994 else if (GET_CODE (lhs
) == MULT
7995 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
7997 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
7998 simplify_gen_unary (NEG
, mode
,
8001 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
8005 SUBST (XEXP (x
, 0), lhs
);
8006 SUBST (XEXP (x
, 1), rhs
);
8008 maybe_swap_commutative_operands (x
);
8014 lhs
= make_compound_operation (lhs
, next_code
);
8015 rhs
= make_compound_operation (rhs
, next_code
);
8016 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
)
8018 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
8020 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
8022 else if (GET_CODE (rhs
) == MULT
8023 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
8025 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
8026 simplify_gen_unary (NEG
, mode
,
8029 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
8033 SUBST (XEXP (x
, 0), lhs
);
8034 SUBST (XEXP (x
, 1), rhs
);
8039 /* If the second operand is not a constant, we can't do anything
8041 if (!CONST_INT_P (XEXP (x
, 1)))
8044 /* If the constant is a power of two minus one and the first operand
8045 is a logical right shift, make an extraction. */
8046 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8047 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8049 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8050 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1),
8051 i
, 1, 0, in_code
== COMPARE
);
8054 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8055 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
8056 && subreg_lowpart_p (XEXP (x
, 0))
8057 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (XEXP (x
, 0))),
8059 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
8060 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8062 rtx inner_x0
= SUBREG_REG (XEXP (x
, 0));
8063 new_rtx
= make_compound_operation (XEXP (inner_x0
, 0), next_code
);
8064 new_rtx
= make_extraction (inner_mode
, new_rtx
, 0,
8066 i
, 1, 0, in_code
== COMPARE
);
8068 /* If we narrowed the mode when dropping the subreg, then we lose. */
8069 if (GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (mode
))
8072 /* If that didn't give anything, see if the AND simplifies on
8074 if (!new_rtx
&& i
>= 0)
8076 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8077 new_rtx
= make_extraction (mode
, new_rtx
, 0, NULL_RTX
, i
, 1,
8078 0, in_code
== COMPARE
);
8081 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8082 else if ((GET_CODE (XEXP (x
, 0)) == XOR
8083 || GET_CODE (XEXP (x
, 0)) == IOR
)
8084 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
8085 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
8086 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8088 /* Apply the distributive law, and then try to make extractions. */
8089 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
8090 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
8092 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
8094 new_rtx
= make_compound_operation (new_rtx
, in_code
);
8097 /* If we are have (and (rotate X C) M) and C is larger than the number
8098 of bits in M, this is an extraction. */
8100 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
8101 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8102 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
8103 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
8105 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8106 new_rtx
= make_extraction (mode
, new_rtx
,
8107 (GET_MODE_PRECISION (mode
)
8108 - INTVAL (XEXP (XEXP (x
, 0), 1))),
8109 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8112 /* On machines without logical shifts, if the operand of the AND is
8113 a logical shift and our mask turns off all the propagated sign
8114 bits, we can replace the logical shift with an arithmetic shift. */
8115 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8116 && !have_insn_for (LSHIFTRT
, mode
)
8117 && have_insn_for (ASHIFTRT
, mode
)
8118 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8119 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8120 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8121 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
8123 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
8125 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
8126 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
8128 gen_rtx_ASHIFTRT (mode
,
8129 make_compound_operation (XEXP (XEXP (x
,
8133 XEXP (XEXP (x
, 0), 1)));
8136 /* If the constant is one less than a power of two, this might be
8137 representable by an extraction even if no shift is present.
8138 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8139 we are in a COMPARE. */
8140 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8141 new_rtx
= make_extraction (mode
,
8142 make_compound_operation (XEXP (x
, 0),
8144 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8146 /* If we are in a comparison and this is an AND with a power of two,
8147 convert this into the appropriate bit extract. */
8148 else if (in_code
== COMPARE
8149 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
8150 && (equality_comparison
|| i
< GET_MODE_PRECISION (mode
) - 1))
8151 new_rtx
= make_extraction (mode
,
8152 make_compound_operation (XEXP (x
, 0),
8154 i
, NULL_RTX
, 1, 1, 0, 1);
8156 /* If the one operand is a paradoxical subreg of a register or memory and
8157 the constant (limited to the smaller mode) has only zero bits where
8158 the sub expression has known zero bits, this can be expressed as
8160 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
)
8164 sub
= XEXP (XEXP (x
, 0), 0);
8165 machine_mode sub_mode
= GET_MODE (sub
);
8166 if ((REG_P (sub
) || MEM_P (sub
))
8167 && GET_MODE_PRECISION (sub_mode
) < mode_width
)
8169 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (sub_mode
);
8170 unsigned HOST_WIDE_INT mask
;
8172 /* original AND constant with all the known zero bits set */
8173 mask
= UINTVAL (XEXP (x
, 1)) | (~nonzero_bits (sub
, sub_mode
));
8174 if ((mask
& mode_mask
) == mode_mask
)
8176 new_rtx
= make_compound_operation (sub
, next_code
);
8177 new_rtx
= make_extraction (mode
, new_rtx
, 0, 0,
8178 GET_MODE_PRECISION (sub_mode
),
8179 1, 0, in_code
== COMPARE
);
8187 /* If the sign bit is known to be zero, replace this with an
8188 arithmetic shift. */
8189 if (have_insn_for (ASHIFTRT
, mode
)
8190 && ! have_insn_for (LSHIFTRT
, mode
)
8191 && mode_width
<= HOST_BITS_PER_WIDE_INT
8192 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
8194 new_rtx
= gen_rtx_ASHIFTRT (mode
,
8195 make_compound_operation (XEXP (x
, 0),
8207 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8208 this is a SIGN_EXTRACT. */
8209 if (CONST_INT_P (rhs
)
8210 && GET_CODE (lhs
) == ASHIFT
8211 && CONST_INT_P (XEXP (lhs
, 1))
8212 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
8213 && INTVAL (XEXP (lhs
, 1)) >= 0
8214 && INTVAL (rhs
) < mode_width
)
8216 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
8217 new_rtx
= make_extraction (mode
, new_rtx
,
8218 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
8219 NULL_RTX
, mode_width
- INTVAL (rhs
),
8220 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8224 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8225 If so, try to merge the shifts into a SIGN_EXTEND. We could
8226 also do this for some cases of SIGN_EXTRACT, but it doesn't
8227 seem worth the effort; the case checked for occurs on Alpha. */
8230 && ! (GET_CODE (lhs
) == SUBREG
8231 && (OBJECT_P (SUBREG_REG (lhs
))))
8232 && CONST_INT_P (rhs
)
8233 && INTVAL (rhs
) >= 0
8234 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
8235 && INTVAL (rhs
) < mode_width
8236 && (new_rtx
= extract_left_shift (mode
, lhs
, INTVAL (rhs
))) != 0)
8237 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
,
8239 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
8240 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8245 /* Call ourselves recursively on the inner expression. If we are
8246 narrowing the object and it has a different RTL code from
8247 what it originally did, do this SUBREG as a force_to_mode. */
8249 rtx inner
= SUBREG_REG (x
), simplified
;
8250 enum rtx_code subreg_code
= in_code
;
8252 /* If the SUBREG is masking of a logical right shift,
8253 make an extraction. */
8254 if (GET_CODE (inner
) == LSHIFTRT
8255 && is_a
<scalar_int_mode
> (GET_MODE (inner
), &inner_mode
)
8256 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (inner_mode
)
8257 && CONST_INT_P (XEXP (inner
, 1))
8258 && UINTVAL (XEXP (inner
, 1)) < GET_MODE_PRECISION (inner_mode
)
8259 && subreg_lowpart_p (x
))
8261 new_rtx
= make_compound_operation (XEXP (inner
, 0), next_code
);
8262 int width
= GET_MODE_PRECISION (inner_mode
)
8263 - INTVAL (XEXP (inner
, 1));
8264 if (width
> mode_width
)
8266 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (inner
, 1),
8267 width
, 1, 0, in_code
== COMPARE
);
8271 /* If in_code is COMPARE, it isn't always safe to pass it through
8272 to the recursive make_compound_operation call. */
8273 if (subreg_code
== COMPARE
8274 && (!subreg_lowpart_p (x
)
8275 || GET_CODE (inner
) == SUBREG
8276 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8277 is (const_int 0), rather than
8278 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8279 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8280 for non-equality comparisons against 0 is not equivalent
8281 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8282 || (GET_CODE (inner
) == AND
8283 && CONST_INT_P (XEXP (inner
, 1))
8284 && partial_subreg_p (x
)
8285 && exact_log2 (UINTVAL (XEXP (inner
, 1)))
8286 >= GET_MODE_BITSIZE (mode
) - 1)))
8289 tem
= make_compound_operation (inner
, subreg_code
);
8292 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
8296 if (GET_CODE (tem
) != GET_CODE (inner
)
8297 && partial_subreg_p (x
)
8298 && subreg_lowpart_p (x
))
8301 = force_to_mode (tem
, mode
, HOST_WIDE_INT_M1U
, 0);
8303 /* If we have something other than a SUBREG, we might have
8304 done an expansion, so rerun ourselves. */
8305 if (GET_CODE (newer
) != SUBREG
)
8306 newer
= make_compound_operation (newer
, in_code
);
8308 /* force_to_mode can expand compounds. If it just re-expanded
8309 the compound, use gen_lowpart to convert to the desired
8311 if (rtx_equal_p (newer
, x
)
8312 /* Likewise if it re-expanded the compound only partially.
8313 This happens for SUBREG of ZERO_EXTRACT if they extract
8314 the same number of bits. */
8315 || (GET_CODE (newer
) == SUBREG
8316 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
8317 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
8318 && GET_CODE (inner
) == AND
8319 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
8320 return gen_lowpart (GET_MODE (x
), tem
);
8335 *x_ptr
= gen_lowpart (mode
, new_rtx
);
8336 *next_code_ptr
= next_code
;
8340 /* Look at the expression rooted at X. Look for expressions
8341 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8342 Form these expressions.
8344 Return the new rtx, usually just X.
8346 Also, for machines like the VAX that don't have logical shift insns,
8347 try to convert logical to arithmetic shift operations in cases where
8348 they are equivalent. This undoes the canonicalizations to logical
8349 shifts done elsewhere.
8351 We try, as much as possible, to re-use rtl expressions to save memory.
8353 IN_CODE says what kind of expression we are processing. Normally, it is
8354 SET. In a memory address it is MEM. When processing the arguments of
8355 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8356 precisely it is an equality comparison against zero. */
8359 make_compound_operation (rtx x
, enum rtx_code in_code
)
8361 enum rtx_code code
= GET_CODE (x
);
8364 enum rtx_code next_code
;
8367 /* Select the code to be used in recursive calls. Once we are inside an
8368 address, we stay there. If we have a comparison, set to COMPARE,
8369 but once inside, go back to our default of SET. */
8371 next_code
= (code
== MEM
? MEM
8372 : ((code
== COMPARE
|| COMPARISON_P (x
))
8373 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
8374 : in_code
== COMPARE
|| in_code
== EQ
? SET
: in_code
);
8376 scalar_int_mode mode
;
8377 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
8379 rtx new_rtx
= make_compound_operation_int (mode
, &x
, in_code
,
8383 code
= GET_CODE (x
);
8386 /* Now recursively process each operand of this operation. We need to
8387 handle ZERO_EXTEND specially so that we don't lose track of the
8389 if (code
== ZERO_EXTEND
)
8391 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8392 tem
= simplify_const_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8393 new_rtx
, GET_MODE (XEXP (x
, 0)));
8396 SUBST (XEXP (x
, 0), new_rtx
);
8400 fmt
= GET_RTX_FORMAT (code
);
8401 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
8404 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
8405 SUBST (XEXP (x
, i
), new_rtx
);
8407 else if (fmt
[i
] == 'E')
8408 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8410 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
8411 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
8414 maybe_swap_commutative_operands (x
);
8418 /* Given M see if it is a value that would select a field of bits
8419 within an item, but not the entire word. Return -1 if not.
8420 Otherwise, return the starting position of the field, where 0 is the
8423 *PLEN is set to the length of the field. */
8426 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
8428 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8429 int pos
= m
? ctz_hwi (m
) : -1;
8433 /* Now shift off the low-order zero bits and see if we have a
8434 power of two minus 1. */
8435 len
= exact_log2 ((m
>> pos
) + 1);
8444 /* If X refers to a register that equals REG in value, replace these
8445 references with REG. */
8447 canon_reg_for_combine (rtx x
, rtx reg
)
8454 enum rtx_code code
= GET_CODE (x
);
8455 switch (GET_RTX_CLASS (code
))
8458 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8459 if (op0
!= XEXP (x
, 0))
8460 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
8465 case RTX_COMM_ARITH
:
8466 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8467 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8468 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8469 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
8473 case RTX_COMM_COMPARE
:
8474 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8475 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8476 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8477 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
8478 GET_MODE (op0
), op0
, op1
);
8482 case RTX_BITFIELD_OPS
:
8483 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8484 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8485 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
8486 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
8487 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
8488 GET_MODE (op0
), op0
, op1
, op2
);
8494 if (rtx_equal_p (get_last_value (reg
), x
)
8495 || rtx_equal_p (reg
, get_last_value (x
)))
8504 fmt
= GET_RTX_FORMAT (code
);
8506 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8509 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
8510 if (op
!= XEXP (x
, i
))
8520 else if (fmt
[i
] == 'E')
8523 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8525 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
8526 if (op
!= XVECEXP (x
, i
, j
))
8533 XVECEXP (x
, i
, j
) = op
;
8544 /* Return X converted to MODE. If the value is already truncated to
8545 MODE we can just return a subreg even though in the general case we
8546 would need an explicit truncation. */
8549 gen_lowpart_or_truncate (machine_mode mode
, rtx x
)
8551 if (!CONST_INT_P (x
)
8552 && partial_subreg_p (mode
, GET_MODE (x
))
8553 && !TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (x
))
8554 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
8556 /* Bit-cast X into an integer mode. */
8557 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
8558 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)).require (), x
);
8559 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
).require (),
8563 return gen_lowpart (mode
, x
);
8566 /* See if X can be simplified knowing that we will only refer to it in
8567 MODE and will only refer to those bits that are nonzero in MASK.
8568 If other bits are being computed or if masking operations are done
8569 that select a superset of the bits in MASK, they can sometimes be
8572 Return a possibly simplified expression, but always convert X to
8573 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8575 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8576 are all off in X. This is used when X will be complemented, by either
8577 NOT, NEG, or XOR. */
8580 force_to_mode (rtx x
, machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8583 enum rtx_code code
= GET_CODE (x
);
8584 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8585 machine_mode op_mode
;
8586 unsigned HOST_WIDE_INT nonzero
;
8588 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8589 code below will do the wrong thing since the mode of such an
8590 expression is VOIDmode.
8592 Also do nothing if X is a CLOBBER; this can happen if X was
8593 the return value from a call to gen_lowpart. */
8594 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8597 /* We want to perform the operation in its present mode unless we know
8598 that the operation is valid in MODE, in which case we do the operation
8600 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8601 && have_insn_for (code
, mode
))
8602 ? mode
: GET_MODE (x
));
8604 /* It is not valid to do a right-shift in a narrower mode
8605 than the one it came in with. */
8606 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8607 && partial_subreg_p (mode
, GET_MODE (x
)))
8608 op_mode
= GET_MODE (x
);
8610 /* Truncate MASK to fit OP_MODE. */
8612 mask
&= GET_MODE_MASK (op_mode
);
8614 /* Determine what bits of X are guaranteed to be (non)zero. */
8615 nonzero
= nonzero_bits (x
, mode
);
8617 /* If none of the bits in X are needed, return a zero. */
8618 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8621 /* If X is a CONST_INT, return a new one. Do this here since the
8622 test below will fail. */
8623 if (CONST_INT_P (x
))
8625 if (SCALAR_INT_MODE_P (mode
))
8626 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8629 x
= GEN_INT (INTVAL (x
) & mask
);
8630 return gen_lowpart_common (mode
, x
);
8634 /* If X is narrower than MODE and we want all the bits in X's mode, just
8635 get X in the proper mode. */
8636 if (paradoxical_subreg_p (mode
, GET_MODE (x
))
8637 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8638 return gen_lowpart (mode
, x
);
8640 /* We can ignore the effect of a SUBREG if it narrows the mode or
8641 if the constant masks to zero all the bits the mode doesn't have. */
8642 if (GET_CODE (x
) == SUBREG
8643 && subreg_lowpart_p (x
)
8644 && (partial_subreg_p (x
)
8646 & GET_MODE_MASK (GET_MODE (x
))
8647 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))) == 0))
8648 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8650 scalar_int_mode int_mode
, xmode
;
8651 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
8652 && is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
8653 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8655 return force_int_to_mode (x
, int_mode
, xmode
,
8656 as_a
<scalar_int_mode
> (op_mode
),
8659 return gen_lowpart_or_truncate (mode
, x
);
8662 /* Subroutine of force_to_mode that handles cases in which both X and
8663 the result are scalar integers. MODE is the mode of the result,
8664 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8665 is preferred for simplified versions of X. The other arguments
8666 are as for force_to_mode. */
8669 force_int_to_mode (rtx x
, scalar_int_mode mode
, scalar_int_mode xmode
,
8670 scalar_int_mode op_mode
, unsigned HOST_WIDE_INT mask
,
8673 enum rtx_code code
= GET_CODE (x
);
8674 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8675 unsigned HOST_WIDE_INT fuller_mask
;
8678 /* When we have an arithmetic operation, or a shift whose count we
8679 do not know, we need to assume that all bits up to the highest-order
8680 bit in MASK will be needed. This is how we form such a mask. */
8681 if (mask
& (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
8682 fuller_mask
= HOST_WIDE_INT_M1U
;
8684 fuller_mask
= ((HOST_WIDE_INT_1U
<< (floor_log2 (mask
) + 1))
8690 /* If X is a (clobber (const_int)), return it since we know we are
8691 generating something that won't match. */
8698 x
= expand_compound_operation (x
);
8699 if (GET_CODE (x
) != code
)
8700 return force_to_mode (x
, mode
, mask
, next_select
);
8704 /* Similarly for a truncate. */
8705 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8708 /* If this is an AND with a constant, convert it into an AND
8709 whose constant is the AND of that constant with MASK. If it
8710 remains an AND of MASK, delete it since it is redundant. */
8712 if (CONST_INT_P (XEXP (x
, 1)))
8714 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8715 mask
& INTVAL (XEXP (x
, 1)));
8718 /* If X is still an AND, see if it is an AND with a mask that
8719 is just some low-order bits. If so, and it is MASK, we don't
8722 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8723 && (INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (xmode
)) == mask
)
8726 /* If it remains an AND, try making another AND with the bits
8727 in the mode mask that aren't in MASK turned on. If the
8728 constant in the AND is wide enough, this might make a
8729 cheaper constant. */
8731 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8732 && GET_MODE_MASK (xmode
) != mask
8733 && HWI_COMPUTABLE_MODE_P (xmode
))
8735 unsigned HOST_WIDE_INT cval
8736 = UINTVAL (XEXP (x
, 1)) | (GET_MODE_MASK (xmode
) & ~mask
);
8739 y
= simplify_gen_binary (AND
, xmode
, XEXP (x
, 0),
8740 gen_int_mode (cval
, xmode
));
8741 if (set_src_cost (y
, xmode
, optimize_this_for_speed_p
)
8742 < set_src_cost (x
, xmode
, optimize_this_for_speed_p
))
8752 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8753 low-order bits (as in an alignment operation) and FOO is already
8754 aligned to that boundary, mask C1 to that boundary as well.
8755 This may eliminate that PLUS and, later, the AND. */
8758 unsigned int width
= GET_MODE_PRECISION (mode
);
8759 unsigned HOST_WIDE_INT smask
= mask
;
8761 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8762 number, sign extend it. */
8764 if (width
< HOST_BITS_PER_WIDE_INT
8765 && (smask
& (HOST_WIDE_INT_1U
<< (width
- 1))) != 0)
8766 smask
|= HOST_WIDE_INT_M1U
<< width
;
8768 if (CONST_INT_P (XEXP (x
, 1))
8769 && pow2p_hwi (- smask
)
8770 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8771 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8772 return force_to_mode (plus_constant (xmode
, XEXP (x
, 0),
8773 (INTVAL (XEXP (x
, 1)) & smask
)),
8774 mode
, smask
, next_select
);
8780 /* Substituting into the operands of a widening MULT is not likely to
8781 create RTL matching a machine insn. */
8783 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
8784 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
8785 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
8786 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
8787 && REG_P (XEXP (XEXP (x
, 0), 0))
8788 && REG_P (XEXP (XEXP (x
, 1), 0)))
8789 return gen_lowpart_or_truncate (mode
, x
);
8791 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8792 most significant bit in MASK since carries from those bits will
8793 affect the bits we are interested in. */
8798 /* If X is (minus C Y) where C's least set bit is larger than any bit
8799 in the mask, then we may replace with (neg Y). */
8800 if (CONST_INT_P (XEXP (x
, 0))
8801 && least_bit_hwi (UINTVAL (XEXP (x
, 0))) > mask
)
8803 x
= simplify_gen_unary (NEG
, xmode
, XEXP (x
, 1), xmode
);
8804 return force_to_mode (x
, mode
, mask
, next_select
);
8807 /* Similarly, if C contains every bit in the fuller_mask, then we may
8808 replace with (not Y). */
8809 if (CONST_INT_P (XEXP (x
, 0))
8810 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8812 x
= simplify_gen_unary (NOT
, xmode
, XEXP (x
, 1), xmode
);
8813 return force_to_mode (x
, mode
, mask
, next_select
);
8821 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8822 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8823 operation which may be a bitfield extraction. Ensure that the
8824 constant we form is not wider than the mode of X. */
8826 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8827 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8828 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8829 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8830 && CONST_INT_P (XEXP (x
, 1))
8831 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8832 + floor_log2 (INTVAL (XEXP (x
, 1))))
8833 < GET_MODE_PRECISION (xmode
))
8834 && (UINTVAL (XEXP (x
, 1))
8835 & ~nonzero_bits (XEXP (x
, 0), xmode
)) == 0)
8837 temp
= gen_int_mode ((INTVAL (XEXP (x
, 1)) & mask
)
8838 << INTVAL (XEXP (XEXP (x
, 0), 1)),
8840 temp
= simplify_gen_binary (GET_CODE (x
), xmode
,
8841 XEXP (XEXP (x
, 0), 0), temp
);
8842 x
= simplify_gen_binary (LSHIFTRT
, xmode
, temp
,
8843 XEXP (XEXP (x
, 0), 1));
8844 return force_to_mode (x
, mode
, mask
, next_select
);
8848 /* For most binary operations, just propagate into the operation and
8849 change the mode if we have an operation of that mode. */
8851 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8852 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
8854 /* If we ended up truncating both operands, truncate the result of the
8855 operation instead. */
8856 if (GET_CODE (op0
) == TRUNCATE
8857 && GET_CODE (op1
) == TRUNCATE
)
8859 op0
= XEXP (op0
, 0);
8860 op1
= XEXP (op1
, 0);
8863 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
8864 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
8866 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8868 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
8874 /* For left shifts, do the same, but just for the first operand.
8875 However, we cannot do anything with shifts where we cannot
8876 guarantee that the counts are smaller than the size of the mode
8877 because such a count will have a different meaning in a
8880 if (! (CONST_INT_P (XEXP (x
, 1))
8881 && INTVAL (XEXP (x
, 1)) >= 0
8882 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (mode
))
8883 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
8884 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
8885 < (unsigned HOST_WIDE_INT
) GET_MODE_PRECISION (mode
))))
8888 /* If the shift count is a constant and we can do arithmetic in
8889 the mode of the shift, refine which bits we need. Otherwise, use the
8890 conservative form of the mask. */
8891 if (CONST_INT_P (XEXP (x
, 1))
8892 && INTVAL (XEXP (x
, 1)) >= 0
8893 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (op_mode
)
8894 && HWI_COMPUTABLE_MODE_P (op_mode
))
8895 mask
>>= INTVAL (XEXP (x
, 1));
8899 op0
= gen_lowpart_or_truncate (op_mode
,
8900 force_to_mode (XEXP (x
, 0), op_mode
,
8901 mask
, next_select
));
8903 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
8905 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
8911 /* Here we can only do something if the shift count is a constant,
8912 this shift constant is valid for the host, and we can do arithmetic
8915 if (CONST_INT_P (XEXP (x
, 1))
8916 && INTVAL (XEXP (x
, 1)) >= 0
8917 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8918 && HWI_COMPUTABLE_MODE_P (op_mode
))
8920 rtx inner
= XEXP (x
, 0);
8921 unsigned HOST_WIDE_INT inner_mask
;
8923 /* Select the mask of the bits we need for the shift operand. */
8924 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
8926 /* We can only change the mode of the shift if we can do arithmetic
8927 in the mode of the shift and INNER_MASK is no wider than the
8928 width of X's mode. */
8929 if ((inner_mask
& ~GET_MODE_MASK (xmode
)) != 0)
8932 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
8934 if (xmode
!= op_mode
|| inner
!= XEXP (x
, 0))
8936 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
8941 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8942 shift and AND produces only copies of the sign bit (C2 is one less
8943 than a power of two), we can do this with just a shift. */
8945 if (GET_CODE (x
) == LSHIFTRT
8946 && CONST_INT_P (XEXP (x
, 1))
8947 /* The shift puts one of the sign bit copies in the least significant
8949 && ((INTVAL (XEXP (x
, 1))
8950 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
8951 >= GET_MODE_PRECISION (xmode
))
8952 && pow2p_hwi (mask
+ 1)
8953 /* Number of bits left after the shift must be more than the mask
8955 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
8956 <= GET_MODE_PRECISION (xmode
))
8957 /* Must be more sign bit copies than the mask needs. */
8958 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
8959 >= exact_log2 (mask
+ 1)))
8961 int nbits
= GET_MODE_PRECISION (xmode
) - exact_log2 (mask
+ 1);
8962 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0),
8963 gen_int_shift_amount (xmode
, nbits
));
8968 /* If we are just looking for the sign bit, we don't need this shift at
8969 all, even if it has a variable count. */
8970 if (val_signbit_p (xmode
, mask
))
8971 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8973 /* If this is a shift by a constant, get a mask that contains those bits
8974 that are not copies of the sign bit. We then have two cases: If
8975 MASK only includes those bits, this can be a logical shift, which may
8976 allow simplifications. If MASK is a single-bit field not within
8977 those bits, we are requesting a copy of the sign bit and hence can
8978 shift the sign bit to the appropriate location. */
8980 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
8981 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
8983 unsigned HOST_WIDE_INT nonzero
;
8986 /* If the considered data is wider than HOST_WIDE_INT, we can't
8987 represent a mask for all its bits in a single scalar.
8988 But we only care about the lower bits, so calculate these. */
8990 if (GET_MODE_PRECISION (xmode
) > HOST_BITS_PER_WIDE_INT
)
8992 nonzero
= HOST_WIDE_INT_M1U
;
8994 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8995 is the number of bits a full-width mask would have set.
8996 We need only shift if these are fewer than nonzero can
8997 hold. If not, we must keep all bits set in nonzero. */
8999 if (GET_MODE_PRECISION (xmode
) - INTVAL (XEXP (x
, 1))
9000 < HOST_BITS_PER_WIDE_INT
)
9001 nonzero
>>= INTVAL (XEXP (x
, 1))
9002 + HOST_BITS_PER_WIDE_INT
9003 - GET_MODE_PRECISION (xmode
);
9007 nonzero
= GET_MODE_MASK (xmode
);
9008 nonzero
>>= INTVAL (XEXP (x
, 1));
9011 if ((mask
& ~nonzero
) == 0)
9013 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, xmode
,
9014 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
9015 if (GET_CODE (x
) != ASHIFTRT
)
9016 return force_to_mode (x
, mode
, mask
, next_select
);
9019 else if ((i
= exact_log2 (mask
)) >= 0)
9021 x
= simplify_shift_const
9022 (NULL_RTX
, LSHIFTRT
, xmode
, XEXP (x
, 0),
9023 GET_MODE_PRECISION (xmode
) - 1 - i
);
9025 if (GET_CODE (x
) != ASHIFTRT
)
9026 return force_to_mode (x
, mode
, mask
, next_select
);
9030 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9031 even if the shift count isn't a constant. */
9033 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0), XEXP (x
, 1));
9037 /* If this is a zero- or sign-extension operation that just affects bits
9038 we don't care about, remove it. Be sure the call above returned
9039 something that is still a shift. */
9041 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
9042 && CONST_INT_P (XEXP (x
, 1))
9043 && INTVAL (XEXP (x
, 1)) >= 0
9044 && (INTVAL (XEXP (x
, 1))
9045 <= GET_MODE_PRECISION (xmode
) - (floor_log2 (mask
) + 1))
9046 && GET_CODE (XEXP (x
, 0)) == ASHIFT
9047 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
9048 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
9055 /* If the shift count is constant and we can do computations
9056 in the mode of X, compute where the bits we care about are.
9057 Otherwise, we can't do anything. Don't change the mode of
9058 the shift or propagate MODE into the shift, though. */
9059 if (CONST_INT_P (XEXP (x
, 1))
9060 && INTVAL (XEXP (x
, 1)) >= 0)
9062 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
9063 xmode
, gen_int_mode (mask
, xmode
),
9065 if (temp
&& CONST_INT_P (temp
))
9066 x
= simplify_gen_binary (code
, xmode
,
9067 force_to_mode (XEXP (x
, 0), xmode
,
9068 INTVAL (temp
), next_select
),
9074 /* If we just want the low-order bit, the NEG isn't needed since it
9075 won't change the low-order bit. */
9077 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
9079 /* We need any bits less significant than the most significant bit in
9080 MASK since carries from those bits will affect the bits we are
9086 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9087 same as the XOR case above. Ensure that the constant we form is not
9088 wider than the mode of X. */
9090 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
9091 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
9092 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
9093 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
9094 < GET_MODE_PRECISION (xmode
))
9095 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
9097 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)), xmode
);
9098 temp
= simplify_gen_binary (XOR
, xmode
, XEXP (XEXP (x
, 0), 0), temp
);
9099 x
= simplify_gen_binary (LSHIFTRT
, xmode
,
9100 temp
, XEXP (XEXP (x
, 0), 1));
9102 return force_to_mode (x
, mode
, mask
, next_select
);
9105 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9106 use the full mask inside the NOT. */
9110 op0
= gen_lowpart_or_truncate (op_mode
,
9111 force_to_mode (XEXP (x
, 0), mode
, mask
,
9113 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
9115 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
9121 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9122 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9123 which is equal to STORE_FLAG_VALUE. */
9124 if ((mask
& ~STORE_FLAG_VALUE
) == 0
9125 && XEXP (x
, 1) == const0_rtx
9126 && GET_MODE (XEXP (x
, 0)) == mode
9127 && pow2p_hwi (nonzero_bits (XEXP (x
, 0), mode
))
9128 && (nonzero_bits (XEXP (x
, 0), mode
)
9129 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
9130 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
9135 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9136 written in a narrower mode. We play it safe and do not do so. */
9138 op0
= gen_lowpart_or_truncate (xmode
,
9139 force_to_mode (XEXP (x
, 1), mode
,
9140 mask
, next_select
));
9141 op1
= gen_lowpart_or_truncate (xmode
,
9142 force_to_mode (XEXP (x
, 2), mode
,
9143 mask
, next_select
));
9144 if (op0
!= XEXP (x
, 1) || op1
!= XEXP (x
, 2))
9145 x
= simplify_gen_ternary (IF_THEN_ELSE
, xmode
,
9146 GET_MODE (XEXP (x
, 0)), XEXP (x
, 0),
9154 /* Ensure we return a value of the proper mode. */
9155 return gen_lowpart_or_truncate (mode
, x
);
9158 /* Return nonzero if X is an expression that has one of two values depending on
9159 whether some other value is zero or nonzero. In that case, we return the
9160 value that is being tested, *PTRUE is set to the value if the rtx being
9161 returned has a nonzero value, and *PFALSE is set to the other alternative.
9163 If we return zero, we set *PTRUE and *PFALSE to X. */
9166 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
9168 machine_mode mode
= GET_MODE (x
);
9169 enum rtx_code code
= GET_CODE (x
);
9170 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
9171 unsigned HOST_WIDE_INT nz
;
9172 scalar_int_mode int_mode
;
9174 /* If we are comparing a value against zero, we are done. */
9175 if ((code
== NE
|| code
== EQ
)
9176 && XEXP (x
, 1) == const0_rtx
)
9178 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
9179 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
9183 /* If this is a unary operation whose operand has one of two values, apply
9184 our opcode to compute those values. */
9185 else if (UNARY_P (x
)
9186 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
9188 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
9189 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
9190 GET_MODE (XEXP (x
, 0)));
9194 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9195 make can't possibly match and would suppress other optimizations. */
9196 else if (code
== COMPARE
)
9199 /* If this is a binary operation, see if either side has only one of two
9200 values. If either one does or if both do and they are conditional on
9201 the same value, compute the new true and false values. */
9202 else if (BINARY_P (x
))
9204 rtx op0
= XEXP (x
, 0);
9205 rtx op1
= XEXP (x
, 1);
9206 cond0
= if_then_else_cond (op0
, &true0
, &false0
);
9207 cond1
= if_then_else_cond (op1
, &true1
, &false1
);
9209 if ((cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
))
9210 && (REG_P (op0
) || REG_P (op1
)))
9212 /* Try to enable a simplification by undoing work done by
9213 if_then_else_cond if it converted a REG into something more
9218 true0
= false0
= op0
;
9223 true1
= false1
= op1
;
9227 if ((cond0
!= 0 || cond1
!= 0)
9228 && ! (cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
)))
9230 /* If if_then_else_cond returned zero, then true/false are the
9231 same rtl. We must copy one of them to prevent invalid rtl
9234 true0
= copy_rtx (true0
);
9235 else if (cond1
== 0)
9236 true1
= copy_rtx (true1
);
9238 if (COMPARISON_P (x
))
9240 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
9242 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
9247 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
9248 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
9251 return cond0
? cond0
: cond1
;
9254 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9255 operands is zero when the other is nonzero, and vice-versa,
9256 and STORE_FLAG_VALUE is 1 or -1. */
9258 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9259 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
9261 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9263 rtx op0
= XEXP (XEXP (x
, 0), 1);
9264 rtx op1
= XEXP (XEXP (x
, 1), 1);
9266 cond0
= XEXP (XEXP (x
, 0), 0);
9267 cond1
= XEXP (XEXP (x
, 1), 0);
9269 if (COMPARISON_P (cond0
)
9270 && COMPARISON_P (cond1
)
9271 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9272 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9273 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9274 || ((swap_condition (GET_CODE (cond0
))
9275 == reversed_comparison_code (cond1
, NULL
))
9276 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9277 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9278 && ! side_effects_p (x
))
9280 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
9281 *pfalse
= simplify_gen_binary (MULT
, mode
,
9283 ? simplify_gen_unary (NEG
, mode
,
9291 /* Similarly for MULT, AND and UMIN, except that for these the result
9293 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9294 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
9295 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9297 cond0
= XEXP (XEXP (x
, 0), 0);
9298 cond1
= XEXP (XEXP (x
, 1), 0);
9300 if (COMPARISON_P (cond0
)
9301 && COMPARISON_P (cond1
)
9302 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9303 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9304 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9305 || ((swap_condition (GET_CODE (cond0
))
9306 == reversed_comparison_code (cond1
, NULL
))
9307 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9308 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9309 && ! side_effects_p (x
))
9311 *ptrue
= *pfalse
= const0_rtx
;
9317 else if (code
== IF_THEN_ELSE
)
9319 /* If we have IF_THEN_ELSE already, extract the condition and
9320 canonicalize it if it is NE or EQ. */
9321 cond0
= XEXP (x
, 0);
9322 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
9323 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
9324 return XEXP (cond0
, 0);
9325 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
9327 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
9328 return XEXP (cond0
, 0);
9334 /* If X is a SUBREG, we can narrow both the true and false values
9335 if the inner expression, if there is a condition. */
9336 else if (code
== SUBREG
9337 && (cond0
= if_then_else_cond (SUBREG_REG (x
), &true0
,
9340 true0
= simplify_gen_subreg (mode
, true0
,
9341 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9342 false0
= simplify_gen_subreg (mode
, false0
,
9343 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9344 if (true0
&& false0
)
9352 /* If X is a constant, this isn't special and will cause confusions
9353 if we treat it as such. Likewise if it is equivalent to a constant. */
9354 else if (CONSTANT_P (x
)
9355 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
9358 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9359 will be least confusing to the rest of the compiler. */
9360 else if (mode
== BImode
)
9362 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
9366 /* If X is known to be either 0 or -1, those are the true and
9367 false values when testing X. */
9368 else if (x
== constm1_rtx
|| x
== const0_rtx
9369 || (is_a
<scalar_int_mode
> (mode
, &int_mode
)
9370 && (num_sign_bit_copies (x
, int_mode
)
9371 == GET_MODE_PRECISION (int_mode
))))
9373 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
9377 /* Likewise for 0 or a single bit. */
9378 else if (HWI_COMPUTABLE_MODE_P (mode
)
9379 && pow2p_hwi (nz
= nonzero_bits (x
, mode
)))
9381 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
9385 /* Otherwise fail; show no condition with true and false values the same. */
9386 *ptrue
= *pfalse
= x
;
9390 /* Return the value of expression X given the fact that condition COND
9391 is known to be true when applied to REG as its first operand and VAL
9392 as its second. X is known to not be shared and so can be modified in
9395 We only handle the simplest cases, and specifically those cases that
9396 arise with IF_THEN_ELSE expressions. */
9399 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
9401 enum rtx_code code
= GET_CODE (x
);
9405 if (side_effects_p (x
))
9408 /* If either operand of the condition is a floating point value,
9409 then we have to avoid collapsing an EQ comparison. */
9411 && rtx_equal_p (x
, reg
)
9412 && ! FLOAT_MODE_P (GET_MODE (x
))
9413 && ! FLOAT_MODE_P (GET_MODE (val
)))
9416 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
9419 /* If X is (abs REG) and we know something about REG's relationship
9420 with zero, we may be able to simplify this. */
9422 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
9425 case GE
: case GT
: case EQ
:
9428 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
9430 GET_MODE (XEXP (x
, 0)));
9435 /* The only other cases we handle are MIN, MAX, and comparisons if the
9436 operands are the same as REG and VAL. */
9438 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
9440 if (rtx_equal_p (XEXP (x
, 0), val
))
9442 std::swap (val
, reg
);
9443 cond
= swap_condition (cond
);
9446 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
9448 if (COMPARISON_P (x
))
9450 if (comparison_dominates_p (cond
, code
))
9451 return const_true_rtx
;
9453 code
= reversed_comparison_code (x
, NULL
);
9455 && comparison_dominates_p (cond
, code
))
9460 else if (code
== SMAX
|| code
== SMIN
9461 || code
== UMIN
|| code
== UMAX
)
9463 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
9465 /* Do not reverse the condition when it is NE or EQ.
9466 This is because we cannot conclude anything about
9467 the value of 'SMAX (x, y)' when x is not equal to y,
9468 but we can when x equals y. */
9469 if ((code
== SMAX
|| code
== UMAX
)
9470 && ! (cond
== EQ
|| cond
== NE
))
9471 cond
= reverse_condition (cond
);
9476 return unsignedp
? x
: XEXP (x
, 1);
9478 return unsignedp
? x
: XEXP (x
, 0);
9480 return unsignedp
? XEXP (x
, 1) : x
;
9482 return unsignedp
? XEXP (x
, 0) : x
;
9489 else if (code
== SUBREG
)
9491 machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
9492 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
9494 if (SUBREG_REG (x
) != r
)
9496 /* We must simplify subreg here, before we lose track of the
9497 original inner_mode. */
9498 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
9499 inner_mode
, SUBREG_BYTE (x
));
9503 SUBST (SUBREG_REG (x
), r
);
9508 /* We don't have to handle SIGN_EXTEND here, because even in the
9509 case of replacing something with a modeless CONST_INT, a
9510 CONST_INT is already (supposed to be) a valid sign extension for
9511 its narrower mode, which implies it's already properly
9512 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9513 story is different. */
9514 else if (code
== ZERO_EXTEND
)
9516 machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
9517 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
9519 if (XEXP (x
, 0) != r
)
9521 /* We must simplify the zero_extend here, before we lose
9522 track of the original inner_mode. */
9523 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
9528 SUBST (XEXP (x
, 0), r
);
9534 fmt
= GET_RTX_FORMAT (code
);
9535 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9538 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
9539 else if (fmt
[i
] == 'E')
9540 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9541 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
9548 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9549 assignment as a field assignment. */
9552 rtx_equal_for_field_assignment_p (rtx x
, rtx y
, bool widen_x
)
9554 if (widen_x
&& GET_MODE (x
) != GET_MODE (y
))
9556 if (paradoxical_subreg_p (GET_MODE (x
), GET_MODE (y
)))
9558 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
9560 x
= adjust_address_nv (x
, GET_MODE (y
),
9561 byte_lowpart_offset (GET_MODE (y
),
9565 if (x
== y
|| rtx_equal_p (x
, y
))
9568 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
9571 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9572 Note that all SUBREGs of MEM are paradoxical; otherwise they
9573 would have been rewritten. */
9574 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
9575 && MEM_P (SUBREG_REG (y
))
9576 && rtx_equal_p (SUBREG_REG (y
),
9577 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
9580 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
9581 && MEM_P (SUBREG_REG (x
))
9582 && rtx_equal_p (SUBREG_REG (x
),
9583 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
9586 /* We used to see if get_last_value of X and Y were the same but that's
9587 not correct. In one direction, we'll cause the assignment to have
9588 the wrong destination and in the case, we'll import a register into this
9589 insn that might have already have been dead. So fail if none of the
9590 above cases are true. */
9594 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9595 Return that assignment if so.
9597 We only handle the most common cases. */
9600 make_field_assignment (rtx x
)
9602 rtx dest
= SET_DEST (x
);
9603 rtx src
= SET_SRC (x
);
9608 unsigned HOST_WIDE_INT len
;
9611 /* All the rules in this function are specific to scalar integers. */
9612 scalar_int_mode mode
;
9613 if (!is_a
<scalar_int_mode
> (GET_MODE (dest
), &mode
))
9616 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9617 a clear of a one-bit field. We will have changed it to
9618 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9621 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
9622 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
9623 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
9624 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9626 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9629 return gen_rtx_SET (assign
, const0_rtx
);
9633 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
9634 && subreg_lowpart_p (XEXP (src
, 0))
9635 && partial_subreg_p (XEXP (src
, 0))
9636 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9637 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9638 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9639 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9641 assign
= make_extraction (VOIDmode
, dest
, 0,
9642 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9645 return gen_rtx_SET (assign
, const0_rtx
);
9649 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9651 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9652 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9653 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9655 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9658 return gen_rtx_SET (assign
, const1_rtx
);
9662 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9663 SRC is an AND with all bits of that field set, then we can discard
9665 if (GET_CODE (dest
) == ZERO_EXTRACT
9666 && CONST_INT_P (XEXP (dest
, 1))
9667 && GET_CODE (src
) == AND
9668 && CONST_INT_P (XEXP (src
, 1)))
9670 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9671 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9672 unsigned HOST_WIDE_INT ze_mask
;
9674 if (width
>= HOST_BITS_PER_WIDE_INT
)
9677 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9679 /* Complete overlap. We can remove the source AND. */
9680 if ((and_mask
& ze_mask
) == ze_mask
)
9681 return gen_rtx_SET (dest
, XEXP (src
, 0));
9683 /* Partial overlap. We can reduce the source AND. */
9684 if ((and_mask
& ze_mask
) != and_mask
)
9686 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9687 gen_int_mode (and_mask
& ze_mask
, mode
));
9688 return gen_rtx_SET (dest
, src
);
9692 /* The other case we handle is assignments into a constant-position
9693 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9694 a mask that has all one bits except for a group of zero bits and
9695 OTHER is known to have zeros where C1 has ones, this is such an
9696 assignment. Compute the position and length from C1. Shift OTHER
9697 to the appropriate position, force it to the required mode, and
9698 make the extraction. Check for the AND in both operands. */
9700 /* One or more SUBREGs might obscure the constant-position field
9701 assignment. The first one we are likely to encounter is an outer
9702 narrowing SUBREG, which we can just strip for the purposes of
9703 identifying the constant-field assignment. */
9704 scalar_int_mode src_mode
= mode
;
9705 if (GET_CODE (src
) == SUBREG
9706 && subreg_lowpart_p (src
)
9707 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (src
)), &src_mode
))
9708 src
= SUBREG_REG (src
);
9710 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9713 rhs
= expand_compound_operation (XEXP (src
, 0));
9714 lhs
= expand_compound_operation (XEXP (src
, 1));
9716 if (GET_CODE (rhs
) == AND
9717 && CONST_INT_P (XEXP (rhs
, 1))
9718 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9719 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9720 /* The second SUBREG that might get in the way is a paradoxical
9721 SUBREG around the first operand of the AND. We want to
9722 pretend the operand is as wide as the destination here. We
9723 do this by adjusting the MEM to wider mode for the sole
9724 purpose of the call to rtx_equal_for_field_assignment_p. Also
9725 note this trick only works for MEMs. */
9726 else if (GET_CODE (rhs
) == AND
9727 && paradoxical_subreg_p (XEXP (rhs
, 0))
9728 && MEM_P (SUBREG_REG (XEXP (rhs
, 0)))
9729 && CONST_INT_P (XEXP (rhs
, 1))
9730 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs
, 0)),
9732 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9733 else if (GET_CODE (lhs
) == AND
9734 && CONST_INT_P (XEXP (lhs
, 1))
9735 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9736 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9737 /* The second SUBREG that might get in the way is a paradoxical
9738 SUBREG around the first operand of the AND. We want to
9739 pretend the operand is as wide as the destination here. We
9740 do this by adjusting the MEM to wider mode for the sole
9741 purpose of the call to rtx_equal_for_field_assignment_p. Also
9742 note this trick only works for MEMs. */
9743 else if (GET_CODE (lhs
) == AND
9744 && paradoxical_subreg_p (XEXP (lhs
, 0))
9745 && MEM_P (SUBREG_REG (XEXP (lhs
, 0)))
9746 && CONST_INT_P (XEXP (lhs
, 1))
9747 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs
, 0)),
9749 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9753 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (mode
), &len
);
9755 || pos
+ len
> GET_MODE_PRECISION (mode
)
9756 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
9757 || (c1
& nonzero_bits (other
, mode
)) != 0)
9760 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9764 /* The mode to use for the source is the mode of the assignment, or of
9765 what is inside a possible STRICT_LOW_PART. */
9766 machine_mode new_mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9767 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9769 /* Shift OTHER right POS places and make it the source, restricting it
9770 to the proper length and mode. */
9772 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9773 src_mode
, other
, pos
),
9775 src
= force_to_mode (src
, new_mode
,
9776 len
>= HOST_BITS_PER_WIDE_INT
9778 : (HOST_WIDE_INT_1U
<< len
) - 1,
9781 /* If SRC is masked by an AND that does not make a difference in
9782 the value being stored, strip it. */
9783 if (GET_CODE (assign
) == ZERO_EXTRACT
9784 && CONST_INT_P (XEXP (assign
, 1))
9785 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9786 && GET_CODE (src
) == AND
9787 && CONST_INT_P (XEXP (src
, 1))
9788 && UINTVAL (XEXP (src
, 1))
9789 == (HOST_WIDE_INT_1U
<< INTVAL (XEXP (assign
, 1))) - 1)
9790 src
= XEXP (src
, 0);
9792 return gen_rtx_SET (assign
, src
);
9795 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9799 apply_distributive_law (rtx x
)
9801 enum rtx_code code
= GET_CODE (x
);
9802 enum rtx_code inner_code
;
9803 rtx lhs
, rhs
, other
;
9806 /* Distributivity is not true for floating point as it can change the
9807 value. So we don't do it unless -funsafe-math-optimizations. */
9808 if (FLOAT_MODE_P (GET_MODE (x
))
9809 && ! flag_unsafe_math_optimizations
)
9812 /* The outer operation can only be one of the following: */
9813 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9814 && code
!= PLUS
&& code
!= MINUS
)
9820 /* If either operand is a primitive we can't do anything, so get out
9822 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9825 lhs
= expand_compound_operation (lhs
);
9826 rhs
= expand_compound_operation (rhs
);
9827 inner_code
= GET_CODE (lhs
);
9828 if (inner_code
!= GET_CODE (rhs
))
9831 /* See if the inner and outer operations distribute. */
9838 /* These all distribute except over PLUS. */
9839 if (code
== PLUS
|| code
== MINUS
)
9844 if (code
!= PLUS
&& code
!= MINUS
)
9849 /* This is also a multiply, so it distributes over everything. */
9852 /* This used to handle SUBREG, but this turned out to be counter-
9853 productive, since (subreg (op ...)) usually is not handled by
9854 insn patterns, and this "optimization" therefore transformed
9855 recognizable patterns into unrecognizable ones. Therefore the
9856 SUBREG case was removed from here.
9858 It is possible that distributing SUBREG over arithmetic operations
9859 leads to an intermediate result than can then be optimized further,
9860 e.g. by moving the outer SUBREG to the other side of a SET as done
9861 in simplify_set. This seems to have been the original intent of
9862 handling SUBREGs here.
9864 However, with current GCC this does not appear to actually happen,
9865 at least on major platforms. If some case is found where removing
9866 the SUBREG case here prevents follow-on optimizations, distributing
9867 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9873 /* Set LHS and RHS to the inner operands (A and B in the example
9874 above) and set OTHER to the common operand (C in the example).
9875 There is only one way to do this unless the inner operation is
9877 if (COMMUTATIVE_ARITH_P (lhs
)
9878 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
9879 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
9880 else if (COMMUTATIVE_ARITH_P (lhs
)
9881 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
9882 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
9883 else if (COMMUTATIVE_ARITH_P (lhs
)
9884 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
9885 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
9886 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
9887 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
9891 /* Form the new inner operation, seeing if it simplifies first. */
9892 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
9894 /* There is one exception to the general way of distributing:
9895 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9896 if (code
== XOR
&& inner_code
== IOR
)
9899 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
9902 /* We may be able to continuing distributing the result, so call
9903 ourselves recursively on the inner operation before forming the
9904 outer operation, which we return. */
9905 return simplify_gen_binary (inner_code
, GET_MODE (x
),
9906 apply_distributive_law (tem
), other
);
9909 /* See if X is of the form (* (+ A B) C), and if so convert to
9910 (+ (* A C) (* B C)) and try to simplify.
9912 Most of the time, this results in no change. However, if some of
9913 the operands are the same or inverses of each other, simplifications
9916 For example, (and (ior A B) (not B)) can occur as the result of
9917 expanding a bit field assignment. When we apply the distributive
9918 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9919 which then simplifies to (and (A (not B))).
9921 Note that no checks happen on the validity of applying the inverse
9922 distributive law. This is pointless since we can do it in the
9923 few places where this routine is called.
9925 N is the index of the term that is decomposed (the arithmetic operation,
9926 i.e. (+ A B) in the first example above). !N is the index of the term that
9927 is distributed, i.e. of C in the first example above. */
9929 distribute_and_simplify_rtx (rtx x
, int n
)
9932 enum rtx_code outer_code
, inner_code
;
9933 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
9935 /* Distributivity is not true for floating point as it can change the
9936 value. So we don't do it unless -funsafe-math-optimizations. */
9937 if (FLOAT_MODE_P (GET_MODE (x
))
9938 && ! flag_unsafe_math_optimizations
)
9941 decomposed
= XEXP (x
, n
);
9942 if (!ARITHMETIC_P (decomposed
))
9945 mode
= GET_MODE (x
);
9946 outer_code
= GET_CODE (x
);
9947 distributed
= XEXP (x
, !n
);
9949 inner_code
= GET_CODE (decomposed
);
9950 inner_op0
= XEXP (decomposed
, 0);
9951 inner_op1
= XEXP (decomposed
, 1);
9953 /* Special case (and (xor B C) (not A)), which is equivalent to
9954 (xor (ior A B) (ior A C)) */
9955 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
9957 distributed
= XEXP (distributed
, 0);
9963 /* Distribute the second term. */
9964 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
9965 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
9969 /* Distribute the first term. */
9970 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
9971 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
9974 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
9976 if (GET_CODE (tmp
) != outer_code
9977 && (set_src_cost (tmp
, mode
, optimize_this_for_speed_p
)
9978 < set_src_cost (x
, mode
, optimize_this_for_speed_p
)))
9984 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9985 in MODE. Return an equivalent form, if different from (and VAROP
9986 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9989 simplify_and_const_int_1 (scalar_int_mode mode
, rtx varop
,
9990 unsigned HOST_WIDE_INT constop
)
9992 unsigned HOST_WIDE_INT nonzero
;
9993 unsigned HOST_WIDE_INT orig_constop
;
9998 orig_constop
= constop
;
9999 if (GET_CODE (varop
) == CLOBBER
)
10002 /* Simplify VAROP knowing that we will be only looking at some of the
10005 Note by passing in CONSTOP, we guarantee that the bits not set in
10006 CONSTOP are not significant and will never be examined. We must
10007 ensure that is the case by explicitly masking out those bits
10008 before returning. */
10009 varop
= force_to_mode (varop
, mode
, constop
, 0);
10011 /* If VAROP is a CLOBBER, we will fail so return it. */
10012 if (GET_CODE (varop
) == CLOBBER
)
10015 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10016 to VAROP and return the new constant. */
10017 if (CONST_INT_P (varop
))
10018 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
10020 /* See what bits may be nonzero in VAROP. Unlike the general case of
10021 a call to nonzero_bits, here we don't care about bits outside
10024 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
10026 /* Turn off all bits in the constant that are known to already be zero.
10027 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10028 which is tested below. */
10030 constop
&= nonzero
;
10032 /* If we don't have any bits left, return zero. */
10036 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10037 a power of two, we can replace this with an ASHIFT. */
10038 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
10039 && (i
= exact_log2 (constop
)) >= 0)
10040 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
10042 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10043 or XOR, then try to apply the distributive law. This may eliminate
10044 operations if either branch can be simplified because of the AND.
10045 It may also make some cases more complex, but those cases probably
10046 won't match a pattern either with or without this. */
10048 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
10050 scalar_int_mode varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10054 apply_distributive_law
10055 (simplify_gen_binary (GET_CODE (varop
), varop_mode
,
10056 simplify_and_const_int (NULL_RTX
, varop_mode
,
10059 simplify_and_const_int (NULL_RTX
, varop_mode
,
10064 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10065 the AND and see if one of the operands simplifies to zero. If so, we
10066 may eliminate it. */
10068 if (GET_CODE (varop
) == PLUS
10069 && pow2p_hwi (constop
+ 1))
10073 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
10074 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
10075 if (o0
== const0_rtx
)
10077 if (o1
== const0_rtx
)
10081 /* Make a SUBREG if necessary. If we can't make it, fail. */
10082 varop
= gen_lowpart (mode
, varop
);
10083 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
10086 /* If we are only masking insignificant bits, return VAROP. */
10087 if (constop
== nonzero
)
10090 if (varop
== orig_varop
&& constop
== orig_constop
)
10093 /* Otherwise, return an AND. */
10094 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
10098 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10101 Return an equivalent form, if different from X. Otherwise, return X. If
10102 X is zero, we are to always construct the equivalent form. */
10105 simplify_and_const_int (rtx x
, scalar_int_mode mode
, rtx varop
,
10106 unsigned HOST_WIDE_INT constop
)
10108 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
10113 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
10114 gen_int_mode (constop
, mode
));
10115 if (GET_MODE (x
) != mode
)
10116 x
= gen_lowpart (mode
, x
);
10120 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10121 We don't care about bits outside of those defined in MODE.
10123 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10124 a shift, AND, or zero_extract, we can do better. */
10127 reg_nonzero_bits_for_combine (const_rtx x
, scalar_int_mode xmode
,
10128 scalar_int_mode mode
,
10129 unsigned HOST_WIDE_INT
*nonzero
)
10132 reg_stat_type
*rsp
;
10134 /* If X is a register whose nonzero bits value is current, use it.
10135 Otherwise, if X is a register whose value we can find, use that
10136 value. Otherwise, use the previously-computed global nonzero bits
10137 for this register. */
10139 rsp
= ®_stat
[REGNO (x
)];
10140 if (rsp
->last_set_value
!= 0
10141 && (rsp
->last_set_mode
== mode
10142 || (GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
10143 && GET_MODE_CLASS (mode
) == MODE_INT
))
10144 && ((rsp
->last_set_label
>= label_tick_ebb_start
10145 && rsp
->last_set_label
< label_tick
)
10146 || (rsp
->last_set_label
== label_tick
10147 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10148 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10149 && REGNO (x
) < reg_n_sets_max
10150 && REG_N_SETS (REGNO (x
)) == 1
10151 && !REGNO_REG_SET_P
10152 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10155 /* Note that, even if the precision of last_set_mode is lower than that
10156 of mode, record_value_for_reg invoked nonzero_bits on the register
10157 with nonzero_bits_mode (because last_set_mode is necessarily integral
10158 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10159 are all valid, hence in mode too since nonzero_bits_mode is defined
10160 to the largest HWI_COMPUTABLE_MODE_P mode. */
10161 *nonzero
&= rsp
->last_set_nonzero_bits
;
10165 tem
= get_last_value (x
);
10168 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
10169 tem
= sign_extend_short_imm (tem
, xmode
, GET_MODE_PRECISION (mode
));
10174 if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
10176 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
10178 if (GET_MODE_PRECISION (xmode
) < GET_MODE_PRECISION (mode
))
10179 /* We don't know anything about the upper bits. */
10180 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (xmode
);
10188 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10189 end of X that are known to be equal to the sign bit. X will be used
10190 in mode MODE; the returned value will always be between 1 and the
10191 number of bits in MODE. */
10194 reg_num_sign_bit_copies_for_combine (const_rtx x
, scalar_int_mode xmode
,
10195 scalar_int_mode mode
,
10196 unsigned int *result
)
10199 reg_stat_type
*rsp
;
10201 rsp
= ®_stat
[REGNO (x
)];
10202 if (rsp
->last_set_value
!= 0
10203 && rsp
->last_set_mode
== mode
10204 && ((rsp
->last_set_label
>= label_tick_ebb_start
10205 && rsp
->last_set_label
< label_tick
)
10206 || (rsp
->last_set_label
== label_tick
10207 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10208 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10209 && REGNO (x
) < reg_n_sets_max
10210 && REG_N_SETS (REGNO (x
)) == 1
10211 && !REGNO_REG_SET_P
10212 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10215 *result
= rsp
->last_set_sign_bit_copies
;
10219 tem
= get_last_value (x
);
10223 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
10224 && GET_MODE_PRECISION (xmode
) == GET_MODE_PRECISION (mode
))
10225 *result
= rsp
->sign_bit_copies
;
10230 /* Return the number of "extended" bits there are in X, when interpreted
10231 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10232 unsigned quantities, this is the number of high-order zero bits.
10233 For signed quantities, this is the number of copies of the sign bit
10234 minus 1. In both case, this function returns the number of "spare"
10235 bits. For example, if two quantities for which this function returns
10236 at least 1 are added, the addition is known not to overflow.
10238 This function will always return 0 unless called during combine, which
10239 implies that it must be called from a define_split. */
10242 extended_count (const_rtx x
, machine_mode mode
, int unsignedp
)
10244 if (nonzero_sign_valid
== 0)
10247 scalar_int_mode int_mode
;
10249 ? (is_a
<scalar_int_mode
> (mode
, &int_mode
)
10250 && HWI_COMPUTABLE_MODE_P (int_mode
)
10251 ? (unsigned int) (GET_MODE_PRECISION (int_mode
) - 1
10252 - floor_log2 (nonzero_bits (x
, int_mode
)))
10254 : num_sign_bit_copies (x
, mode
) - 1);
10257 /* This function is called from `simplify_shift_const' to merge two
10258 outer operations. Specifically, we have already found that we need
10259 to perform operation *POP0 with constant *PCONST0 at the outermost
10260 position. We would now like to also perform OP1 with constant CONST1
10261 (with *POP0 being done last).
10263 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10264 the resulting operation. *PCOMP_P is set to 1 if we would need to
10265 complement the innermost operand, otherwise it is unchanged.
10267 MODE is the mode in which the operation will be done. No bits outside
10268 the width of this mode matter. It is assumed that the width of this mode
10269 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10271 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10272 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10273 result is simply *PCONST0.
10275 If the resulting operation cannot be expressed as one operation, we
10276 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10279 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, machine_mode mode
, int *pcomp_p
)
10281 enum rtx_code op0
= *pop0
;
10282 HOST_WIDE_INT const0
= *pconst0
;
10284 const0
&= GET_MODE_MASK (mode
);
10285 const1
&= GET_MODE_MASK (mode
);
10287 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10291 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10294 if (op1
== UNKNOWN
|| op0
== SET
)
10297 else if (op0
== UNKNOWN
)
10298 op0
= op1
, const0
= const1
;
10300 else if (op0
== op1
)
10324 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10325 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
10328 /* If the two constants aren't the same, we can't do anything. The
10329 remaining six cases can all be done. */
10330 else if (const0
!= const1
)
10338 /* (a & b) | b == b */
10340 else /* op1 == XOR */
10341 /* (a ^ b) | b == a | b */
10347 /* (a & b) ^ b == (~a) & b */
10348 op0
= AND
, *pcomp_p
= 1;
10349 else /* op1 == IOR */
10350 /* (a | b) ^ b == a & ~b */
10351 op0
= AND
, const0
= ~const0
;
10356 /* (a | b) & b == b */
10358 else /* op1 == XOR */
10359 /* (a ^ b) & b) == (~a) & b */
10366 /* Check for NO-OP cases. */
10367 const0
&= GET_MODE_MASK (mode
);
10369 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
10371 else if (const0
== 0 && op0
== AND
)
10373 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
10379 /* ??? Slightly redundant with the above mask, but not entirely.
10380 Moving this above means we'd have to sign-extend the mode mask
10381 for the final test. */
10382 if (op0
!= UNKNOWN
&& op0
!= NEG
)
10383 *pconst0
= trunc_int_for_mode (const0
, mode
);
10388 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10389 the shift in. The original shift operation CODE is performed on OP in
10390 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10391 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10392 result of the shift is subject to operation OUTER_CODE with operand
10395 static scalar_int_mode
10396 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
10397 scalar_int_mode orig_mode
, scalar_int_mode mode
,
10398 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
10400 gcc_assert (GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (orig_mode
));
10402 /* In general we can't perform in wider mode for right shift and rotate. */
10406 /* We can still widen if the bits brought in from the left are identical
10407 to the sign bit of ORIG_MODE. */
10408 if (num_sign_bit_copies (op
, mode
)
10409 > (unsigned) (GET_MODE_PRECISION (mode
)
10410 - GET_MODE_PRECISION (orig_mode
)))
10415 /* Similarly here but with zero bits. */
10416 if (HWI_COMPUTABLE_MODE_P (mode
)
10417 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
10420 /* We can also widen if the bits brought in will be masked off. This
10421 operation is performed in ORIG_MODE. */
10422 if (outer_code
== AND
)
10424 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
10427 && GET_MODE_PRECISION (orig_mode
) - care_bits
>= count
)
10436 gcc_unreachable ();
10443 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10444 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10445 if we cannot simplify it. Otherwise, return a simplified value.
10447 The shift is normally computed in the widest mode we find in VAROP, as
10448 long as it isn't a different number of words than RESULT_MODE. Exceptions
10449 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10452 simplify_shift_const_1 (enum rtx_code code
, machine_mode result_mode
,
10453 rtx varop
, int orig_count
)
10455 enum rtx_code orig_code
= code
;
10456 rtx orig_varop
= varop
;
10458 machine_mode mode
= result_mode
;
10459 machine_mode shift_mode
;
10460 scalar_int_mode tmode
, inner_mode
, int_mode
, int_varop_mode
, int_result_mode
;
10461 unsigned int mode_words
10462 = (GET_MODE_SIZE (mode
) + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
;
10463 /* We form (outer_op (code varop count) (outer_const)). */
10464 enum rtx_code outer_op
= UNKNOWN
;
10465 HOST_WIDE_INT outer_const
= 0;
10466 int complement_p
= 0;
10469 /* Make sure and truncate the "natural" shift on the way in. We don't
10470 want to do this inside the loop as it makes it more difficult to
10472 if (SHIFT_COUNT_TRUNCATED
)
10473 orig_count
&= GET_MODE_UNIT_BITSIZE (mode
) - 1;
10475 /* If we were given an invalid count, don't do anything except exactly
10476 what was requested. */
10478 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_UNIT_PRECISION (mode
))
10481 count
= orig_count
;
10483 /* Unless one of the branches of the `if' in this loop does a `continue',
10484 we will `break' the loop after the `if'. */
10488 /* If we have an operand of (clobber (const_int 0)), fail. */
10489 if (GET_CODE (varop
) == CLOBBER
)
10492 /* Convert ROTATERT to ROTATE. */
10493 if (code
== ROTATERT
)
10495 unsigned int bitsize
= GET_MODE_UNIT_PRECISION (result_mode
);
10497 count
= bitsize
- count
;
10500 shift_mode
= result_mode
;
10501 if (shift_mode
!= mode
)
10503 /* We only change the modes of scalar shifts. */
10504 int_mode
= as_a
<scalar_int_mode
> (mode
);
10505 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10506 shift_mode
= try_widen_shift_mode (code
, varop
, count
,
10507 int_result_mode
, int_mode
,
10508 outer_op
, outer_const
);
10511 scalar_int_mode shift_unit_mode
10512 = as_a
<scalar_int_mode
> (GET_MODE_INNER (shift_mode
));
10514 /* Handle cases where the count is greater than the size of the mode
10515 minus 1. For ASHIFT, use the size minus one as the count (this can
10516 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10517 take the count modulo the size. For other shifts, the result is
10520 Since these shifts are being produced by the compiler by combining
10521 multiple operations, each of which are defined, we know what the
10522 result is supposed to be. */
10524 if (count
> (GET_MODE_PRECISION (shift_unit_mode
) - 1))
10526 if (code
== ASHIFTRT
)
10527 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10528 else if (code
== ROTATE
|| code
== ROTATERT
)
10529 count
%= GET_MODE_PRECISION (shift_unit_mode
);
10532 /* We can't simply return zero because there may be an
10534 varop
= const0_rtx
;
10540 /* If we discovered we had to complement VAROP, leave. Making a NOT
10541 here would cause an infinite loop. */
10545 if (shift_mode
== shift_unit_mode
)
10547 /* An arithmetic right shift of a quantity known to be -1 or 0
10549 if (code
== ASHIFTRT
10550 && (num_sign_bit_copies (varop
, shift_unit_mode
)
10551 == GET_MODE_PRECISION (shift_unit_mode
)))
10557 /* If we are doing an arithmetic right shift and discarding all but
10558 the sign bit copies, this is equivalent to doing a shift by the
10559 bitsize minus one. Convert it into that shift because it will
10560 often allow other simplifications. */
10562 if (code
== ASHIFTRT
10563 && (count
+ num_sign_bit_copies (varop
, shift_unit_mode
)
10564 >= GET_MODE_PRECISION (shift_unit_mode
)))
10565 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10567 /* We simplify the tests below and elsewhere by converting
10568 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10569 `make_compound_operation' will convert it to an ASHIFTRT for
10570 those machines (such as VAX) that don't have an LSHIFTRT. */
10571 if (code
== ASHIFTRT
10572 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10573 && val_signbit_known_clear_p (shift_unit_mode
,
10574 nonzero_bits (varop
,
10578 if (((code
== LSHIFTRT
10579 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10580 && !(nonzero_bits (varop
, shift_unit_mode
) >> count
))
10582 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10583 && !((nonzero_bits (varop
, shift_unit_mode
) << count
)
10584 & GET_MODE_MASK (shift_unit_mode
))))
10585 && !side_effects_p (varop
))
10586 varop
= const0_rtx
;
10589 switch (GET_CODE (varop
))
10595 new_rtx
= expand_compound_operation (varop
);
10596 if (new_rtx
!= varop
)
10604 /* The following rules apply only to scalars. */
10605 if (shift_mode
!= shift_unit_mode
)
10607 int_mode
= as_a
<scalar_int_mode
> (mode
);
10609 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10610 minus the width of a smaller mode, we can do this with a
10611 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10612 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10613 && ! mode_dependent_address_p (XEXP (varop
, 0),
10614 MEM_ADDR_SPACE (varop
))
10615 && ! MEM_VOLATILE_P (varop
)
10616 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode
) - count
, 1)
10619 new_rtx
= adjust_address_nv (varop
, tmode
,
10620 BYTES_BIG_ENDIAN
? 0
10621 : count
/ BITS_PER_UNIT
);
10623 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
10624 : ZERO_EXTEND
, int_mode
, new_rtx
);
10631 /* The following rules apply only to scalars. */
10632 if (shift_mode
!= shift_unit_mode
)
10634 int_mode
= as_a
<scalar_int_mode
> (mode
);
10635 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10637 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10638 the same number of words as what we've seen so far. Then store
10639 the widest mode in MODE. */
10640 if (subreg_lowpart_p (varop
)
10641 && is_int_mode (GET_MODE (SUBREG_REG (varop
)), &inner_mode
)
10642 && GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_varop_mode
)
10643 && (unsigned int) ((GET_MODE_SIZE (inner_mode
)
10644 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
10646 && GET_MODE_CLASS (int_varop_mode
) == MODE_INT
)
10648 varop
= SUBREG_REG (varop
);
10649 if (GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_mode
))
10656 /* Some machines use MULT instead of ASHIFT because MULT
10657 is cheaper. But it is still better on those machines to
10658 merge two shifts into one. */
10659 if (CONST_INT_P (XEXP (varop
, 1))
10660 && (log2
= exact_log2 (UINTVAL (XEXP (varop
, 1)))) >= 0)
10662 rtx log2_rtx
= gen_int_shift_amount (GET_MODE (varop
), log2
);
10663 varop
= simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
10664 XEXP (varop
, 0), log2_rtx
);
10670 /* Similar, for when divides are cheaper. */
10671 if (CONST_INT_P (XEXP (varop
, 1))
10672 && (log2
= exact_log2 (UINTVAL (XEXP (varop
, 1)))) >= 0)
10674 rtx log2_rtx
= gen_int_shift_amount (GET_MODE (varop
), log2
);
10675 varop
= simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10676 XEXP (varop
, 0), log2_rtx
);
10682 /* If we are extracting just the sign bit of an arithmetic
10683 right shift, that shift is not needed. However, the sign
10684 bit of a wider mode may be different from what would be
10685 interpreted as the sign bit in a narrower mode, so, if
10686 the result is narrower, don't discard the shift. */
10687 if (code
== LSHIFTRT
10688 && count
== (GET_MODE_UNIT_BITSIZE (result_mode
) - 1)
10689 && (GET_MODE_UNIT_BITSIZE (result_mode
)
10690 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop
))))
10692 varop
= XEXP (varop
, 0);
10701 /* The following rules apply only to scalars. */
10702 if (shift_mode
!= shift_unit_mode
)
10704 int_mode
= as_a
<scalar_int_mode
> (mode
);
10705 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10706 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10708 /* Here we have two nested shifts. The result is usually the
10709 AND of a new shift with a mask. We compute the result below. */
10710 if (CONST_INT_P (XEXP (varop
, 1))
10711 && INTVAL (XEXP (varop
, 1)) >= 0
10712 && INTVAL (XEXP (varop
, 1)) < GET_MODE_PRECISION (int_varop_mode
)
10713 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
10714 && HWI_COMPUTABLE_MODE_P (int_mode
))
10716 enum rtx_code first_code
= GET_CODE (varop
);
10717 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10718 unsigned HOST_WIDE_INT mask
;
10721 /* We have one common special case. We can't do any merging if
10722 the inner code is an ASHIFTRT of a smaller mode. However, if
10723 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10724 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10725 we can convert it to
10726 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10727 This simplifies certain SIGN_EXTEND operations. */
10728 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10729 && count
== (GET_MODE_PRECISION (int_result_mode
)
10730 - GET_MODE_PRECISION (int_varop_mode
)))
10732 /* C3 has the low-order C1 bits zero. */
10734 mask
= GET_MODE_MASK (int_mode
)
10735 & ~((HOST_WIDE_INT_1U
<< first_count
) - 1);
10737 varop
= simplify_and_const_int (NULL_RTX
, int_result_mode
,
10738 XEXP (varop
, 0), mask
);
10739 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
,
10740 int_result_mode
, varop
, count
);
10741 count
= first_count
;
10746 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10747 than C1 high-order bits equal to the sign bit, we can convert
10748 this to either an ASHIFT or an ASHIFTRT depending on the
10751 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10753 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10754 && int_varop_mode
== shift_unit_mode
10755 && (num_sign_bit_copies (XEXP (varop
, 0), shift_unit_mode
)
10758 varop
= XEXP (varop
, 0);
10759 count
-= first_count
;
10769 /* There are some cases we can't do. If CODE is ASHIFTRT,
10770 we can only do this if FIRST_CODE is also ASHIFTRT.
10772 We can't do the case when CODE is ROTATE and FIRST_CODE is
10775 If the mode of this shift is not the mode of the outer shift,
10776 we can't do this if either shift is a right shift or ROTATE.
10778 Finally, we can't do any of these if the mode is too wide
10779 unless the codes are the same.
10781 Handle the case where the shift codes are the same
10784 if (code
== first_code
)
10786 if (int_varop_mode
!= int_result_mode
10787 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10788 || code
== ROTATE
))
10791 count
+= first_count
;
10792 varop
= XEXP (varop
, 0);
10796 if (code
== ASHIFTRT
10797 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10798 || GET_MODE_PRECISION (int_mode
) > HOST_BITS_PER_WIDE_INT
10799 || (int_varop_mode
!= int_result_mode
10800 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10801 || first_code
== ROTATE
10802 || code
== ROTATE
)))
10805 /* To compute the mask to apply after the shift, shift the
10806 nonzero bits of the inner shift the same way the
10807 outer shift will. */
10809 mask_rtx
= gen_int_mode (nonzero_bits (varop
, int_varop_mode
),
10811 rtx count_rtx
= gen_int_shift_amount (int_result_mode
, count
);
10813 = simplify_const_binary_operation (code
, int_result_mode
,
10814 mask_rtx
, count_rtx
);
10816 /* Give up if we can't compute an outer operation to use. */
10818 || !CONST_INT_P (mask_rtx
)
10819 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10821 int_result_mode
, &complement_p
))
10824 /* If the shifts are in the same direction, we add the
10825 counts. Otherwise, we subtract them. */
10826 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10827 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10828 count
+= first_count
;
10830 count
-= first_count
;
10832 /* If COUNT is positive, the new shift is usually CODE,
10833 except for the two exceptions below, in which case it is
10834 FIRST_CODE. If the count is negative, FIRST_CODE should
10837 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10838 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10840 else if (count
< 0)
10841 code
= first_code
, count
= -count
;
10843 varop
= XEXP (varop
, 0);
10847 /* If we have (A << B << C) for any shift, we can convert this to
10848 (A << C << B). This wins if A is a constant. Only try this if
10849 B is not a constant. */
10851 else if (GET_CODE (varop
) == code
10852 && CONST_INT_P (XEXP (varop
, 0))
10853 && !CONST_INT_P (XEXP (varop
, 1)))
10855 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10856 sure the result will be masked. See PR70222. */
10857 if (code
== LSHIFTRT
10858 && int_mode
!= int_result_mode
10859 && !merge_outer_ops (&outer_op
, &outer_const
, AND
,
10860 GET_MODE_MASK (int_result_mode
)
10861 >> orig_count
, int_result_mode
,
10864 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10865 up outer sign extension (often left and right shift) is
10866 hardly more efficient than the original. See PR70429. */
10867 if (code
== ASHIFTRT
&& int_mode
!= int_result_mode
)
10870 rtx count_rtx
= gen_int_shift_amount (int_result_mode
, count
);
10871 rtx new_rtx
= simplify_const_binary_operation (code
, int_mode
,
10874 varop
= gen_rtx_fmt_ee (code
, int_mode
, new_rtx
, XEXP (varop
, 1));
10881 /* The following rules apply only to scalars. */
10882 if (shift_mode
!= shift_unit_mode
)
10885 /* Make this fit the case below. */
10886 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0), constm1_rtx
);
10892 /* The following rules apply only to scalars. */
10893 if (shift_mode
!= shift_unit_mode
)
10895 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10896 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10898 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10899 with C the size of VAROP - 1 and the shift is logical if
10900 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10901 we have an (le X 0) operation. If we have an arithmetic shift
10902 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10903 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10905 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
10906 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
10907 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10908 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10909 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
10910 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10913 varop
= gen_rtx_LE (int_varop_mode
, XEXP (varop
, 1),
10916 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10917 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
10922 /* If we have (shift (logical)), move the logical to the outside
10923 to allow it to possibly combine with another logical and the
10924 shift to combine with another shift. This also canonicalizes to
10925 what a ZERO_EXTRACT looks like. Also, some machines have
10926 (and (shift)) insns. */
10928 if (CONST_INT_P (XEXP (varop
, 1))
10929 /* We can't do this if we have (ashiftrt (xor)) and the
10930 constant has its sign bit set in shift_unit_mode with
10931 shift_unit_mode wider than result_mode. */
10932 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10933 && int_result_mode
!= shift_unit_mode
10934 && trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10935 shift_unit_mode
) < 0)
10936 && (new_rtx
= simplify_const_binary_operation
10937 (code
, int_result_mode
,
10938 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
10939 gen_int_shift_amount (int_result_mode
, count
))) != 0
10940 && CONST_INT_P (new_rtx
)
10941 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
10942 INTVAL (new_rtx
), int_result_mode
,
10945 varop
= XEXP (varop
, 0);
10949 /* If we can't do that, try to simplify the shift in each arm of the
10950 logical expression, make a new logical expression, and apply
10951 the inverse distributive law. This also can't be done for
10952 (ashiftrt (xor)) where we've widened the shift and the constant
10953 changes the sign bit. */
10954 if (CONST_INT_P (XEXP (varop
, 1))
10955 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10956 && int_result_mode
!= shift_unit_mode
10957 && trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10958 shift_unit_mode
) < 0))
10960 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
10961 XEXP (varop
, 0), count
);
10962 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
10963 XEXP (varop
, 1), count
);
10965 varop
= simplify_gen_binary (GET_CODE (varop
), shift_unit_mode
,
10967 varop
= apply_distributive_law (varop
);
10975 /* The following rules apply only to scalars. */
10976 if (shift_mode
!= shift_unit_mode
)
10978 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10980 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10981 says that the sign bit can be tested, FOO has mode MODE, C is
10982 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10983 that may be nonzero. */
10984 if (code
== LSHIFTRT
10985 && XEXP (varop
, 1) == const0_rtx
10986 && GET_MODE (XEXP (varop
, 0)) == int_result_mode
10987 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
10988 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
10989 && STORE_FLAG_VALUE
== -1
10990 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
10991 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
10992 int_result_mode
, &complement_p
))
10994 varop
= XEXP (varop
, 0);
11001 /* The following rules apply only to scalars. */
11002 if (shift_mode
!= shift_unit_mode
)
11004 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11006 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11007 than the number of bits in the mode is equivalent to A. */
11008 if (code
== LSHIFTRT
11009 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11010 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1)
11012 varop
= XEXP (varop
, 0);
11017 /* NEG commutes with ASHIFT since it is multiplication. Move the
11018 NEG outside to allow shifts to combine. */
11020 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0,
11021 int_result_mode
, &complement_p
))
11023 varop
= XEXP (varop
, 0);
11029 /* The following rules apply only to scalars. */
11030 if (shift_mode
!= shift_unit_mode
)
11032 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11034 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11035 is one less than the number of bits in the mode is
11036 equivalent to (xor A 1). */
11037 if (code
== LSHIFTRT
11038 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11039 && XEXP (varop
, 1) == constm1_rtx
11040 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
11041 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
11042 int_result_mode
, &complement_p
))
11045 varop
= XEXP (varop
, 0);
11049 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11050 that might be nonzero in BAR are those being shifted out and those
11051 bits are known zero in FOO, we can replace the PLUS with FOO.
11052 Similarly in the other operand order. This code occurs when
11053 we are computing the size of a variable-size array. */
11055 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
11056 && count
< HOST_BITS_PER_WIDE_INT
11057 && nonzero_bits (XEXP (varop
, 1), int_result_mode
) >> count
== 0
11058 && (nonzero_bits (XEXP (varop
, 1), int_result_mode
)
11059 & nonzero_bits (XEXP (varop
, 0), int_result_mode
)) == 0)
11061 varop
= XEXP (varop
, 0);
11064 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
11065 && count
< HOST_BITS_PER_WIDE_INT
11066 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
11067 && (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
11069 && (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
11070 & nonzero_bits (XEXP (varop
, 1), int_result_mode
)) == 0)
11072 varop
= XEXP (varop
, 1);
11076 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11078 && CONST_INT_P (XEXP (varop
, 1))
11079 && (new_rtx
= simplify_const_binary_operation
11080 (ASHIFT
, int_result_mode
,
11081 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11082 gen_int_shift_amount (int_result_mode
, count
))) != 0
11083 && CONST_INT_P (new_rtx
)
11084 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
11085 INTVAL (new_rtx
), int_result_mode
,
11088 varop
= XEXP (varop
, 0);
11092 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11093 signbit', and attempt to change the PLUS to an XOR and move it to
11094 the outer operation as is done above in the AND/IOR/XOR case
11095 leg for shift(logical). See details in logical handling above
11096 for reasoning in doing so. */
11097 if (code
== LSHIFTRT
11098 && CONST_INT_P (XEXP (varop
, 1))
11099 && mode_signbit_p (int_result_mode
, XEXP (varop
, 1))
11100 && (new_rtx
= simplify_const_binary_operation
11101 (code
, int_result_mode
,
11102 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11103 gen_int_shift_amount (int_result_mode
, count
))) != 0
11104 && CONST_INT_P (new_rtx
)
11105 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
11106 INTVAL (new_rtx
), int_result_mode
,
11109 varop
= XEXP (varop
, 0);
11116 /* The following rules apply only to scalars. */
11117 if (shift_mode
!= shift_unit_mode
)
11119 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
11121 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11122 with C the size of VAROP - 1 and the shift is logical if
11123 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11124 we have a (gt X 0) operation. If the shift is arithmetic with
11125 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11126 we have a (neg (gt X 0)) operation. */
11128 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
11129 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
11130 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
11131 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
11132 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11133 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
11134 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
11137 varop
= gen_rtx_GT (int_varop_mode
, XEXP (varop
, 1),
11140 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
11141 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
11148 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11149 if the truncate does not affect the value. */
11150 if (code
== LSHIFTRT
11151 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
11152 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11153 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
11154 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop
, 0)))
11155 - GET_MODE_UNIT_PRECISION (GET_MODE (varop
)))))
11157 rtx varop_inner
= XEXP (varop
, 0);
11158 int new_count
= count
+ INTVAL (XEXP (varop_inner
, 1));
11159 rtx new_count_rtx
= gen_int_shift_amount (GET_MODE (varop_inner
),
11161 varop_inner
= gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
11162 XEXP (varop_inner
, 0),
11164 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
11177 shift_mode
= result_mode
;
11178 if (shift_mode
!= mode
)
11180 /* We only change the modes of scalar shifts. */
11181 int_mode
= as_a
<scalar_int_mode
> (mode
);
11182 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11183 shift_mode
= try_widen_shift_mode (code
, varop
, count
, int_result_mode
,
11184 int_mode
, outer_op
, outer_const
);
11187 /* We have now finished analyzing the shift. The result should be
11188 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11189 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11190 to the result of the shift. OUTER_CONST is the relevant constant,
11191 but we must turn off all bits turned off in the shift. */
11193 if (outer_op
== UNKNOWN
11194 && orig_code
== code
&& orig_count
== count
11195 && varop
== orig_varop
11196 && shift_mode
== GET_MODE (varop
))
11199 /* Make a SUBREG if necessary. If we can't make it, fail. */
11200 varop
= gen_lowpart (shift_mode
, varop
);
11201 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
11204 /* If we have an outer operation and we just made a shift, it is
11205 possible that we could have simplified the shift were it not
11206 for the outer operation. So try to do the simplification
11209 if (outer_op
!= UNKNOWN
)
11210 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
11215 x
= simplify_gen_binary (code
, shift_mode
, varop
,
11216 gen_int_shift_amount (shift_mode
, count
));
11218 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11219 turn off all the bits that the shift would have turned off. */
11220 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
11221 /* We only change the modes of scalar shifts. */
11222 x
= simplify_and_const_int (NULL_RTX
, as_a
<scalar_int_mode
> (shift_mode
),
11223 x
, GET_MODE_MASK (result_mode
) >> orig_count
);
11225 /* Do the remainder of the processing in RESULT_MODE. */
11226 x
= gen_lowpart_or_truncate (result_mode
, x
);
11228 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11231 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
11233 if (outer_op
!= UNKNOWN
)
11235 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11237 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
11238 && GET_MODE_PRECISION (int_result_mode
) < HOST_BITS_PER_WIDE_INT
)
11239 outer_const
= trunc_int_for_mode (outer_const
, int_result_mode
);
11241 if (outer_op
== AND
)
11242 x
= simplify_and_const_int (NULL_RTX
, int_result_mode
, x
, outer_const
);
11243 else if (outer_op
== SET
)
11245 /* This means that we have determined that the result is
11246 equivalent to a constant. This should be rare. */
11247 if (!side_effects_p (x
))
11248 x
= GEN_INT (outer_const
);
11250 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
11251 x
= simplify_gen_unary (outer_op
, int_result_mode
, x
, int_result_mode
);
11253 x
= simplify_gen_binary (outer_op
, int_result_mode
, x
,
11254 GEN_INT (outer_const
));
11260 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11261 The result of the shift is RESULT_MODE. If we cannot simplify it,
11262 return X or, if it is NULL, synthesize the expression with
11263 simplify_gen_binary. Otherwise, return a simplified value.
11265 The shift is normally computed in the widest mode we find in VAROP, as
11266 long as it isn't a different number of words than RESULT_MODE. Exceptions
11267 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11270 simplify_shift_const (rtx x
, enum rtx_code code
, machine_mode result_mode
,
11271 rtx varop
, int count
)
11273 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
11278 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
,
11279 gen_int_shift_amount (GET_MODE (varop
), count
));
11280 if (GET_MODE (x
) != result_mode
)
11281 x
= gen_lowpart (result_mode
, x
);
11286 /* A subroutine of recog_for_combine. See there for arguments and
11290 recog_for_combine_1 (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11292 rtx pat
= *pnewpat
;
11293 rtx pat_without_clobbers
;
11294 int insn_code_number
;
11295 int num_clobbers_to_add
= 0;
11297 rtx notes
= NULL_RTX
;
11298 rtx old_notes
, old_pat
;
11301 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11302 we use to indicate that something didn't match. If we find such a
11303 thing, force rejection. */
11304 if (GET_CODE (pat
) == PARALLEL
)
11305 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
11306 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
11307 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
11310 old_pat
= PATTERN (insn
);
11311 old_notes
= REG_NOTES (insn
);
11312 PATTERN (insn
) = pat
;
11313 REG_NOTES (insn
) = NULL_RTX
;
11315 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11316 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11318 if (insn_code_number
< 0)
11319 fputs ("Failed to match this instruction:\n", dump_file
);
11321 fputs ("Successfully matched this instruction:\n", dump_file
);
11322 print_rtl_single (dump_file
, pat
);
11325 /* If it isn't, there is the possibility that we previously had an insn
11326 that clobbered some register as a side effect, but the combined
11327 insn doesn't need to do that. So try once more without the clobbers
11328 unless this represents an ASM insn. */
11330 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
11331 && GET_CODE (pat
) == PARALLEL
)
11335 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
11336 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
11339 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
11343 SUBST_INT (XVECLEN (pat
, 0), pos
);
11346 pat
= XVECEXP (pat
, 0, 0);
11348 PATTERN (insn
) = pat
;
11349 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11350 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11352 if (insn_code_number
< 0)
11353 fputs ("Failed to match this instruction:\n", dump_file
);
11355 fputs ("Successfully matched this instruction:\n", dump_file
);
11356 print_rtl_single (dump_file
, pat
);
11360 pat_without_clobbers
= pat
;
11362 PATTERN (insn
) = old_pat
;
11363 REG_NOTES (insn
) = old_notes
;
11365 /* Recognize all noop sets, these will be killed by followup pass. */
11366 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
11367 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
11369 /* If we had any clobbers to add, make a new pattern than contains
11370 them. Then check to make sure that all of them are dead. */
11371 if (num_clobbers_to_add
)
11373 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
11374 rtvec_alloc (GET_CODE (pat
) == PARALLEL
11375 ? (XVECLEN (pat
, 0)
11376 + num_clobbers_to_add
)
11377 : num_clobbers_to_add
+ 1));
11379 if (GET_CODE (pat
) == PARALLEL
)
11380 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11381 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
11383 XVECEXP (newpat
, 0, 0) = pat
;
11385 add_clobbers (newpat
, insn_code_number
);
11387 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
11388 i
< XVECLEN (newpat
, 0); i
++)
11390 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
11391 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
11393 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
11395 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
11396 notes
= alloc_reg_note (REG_UNUSED
,
11397 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
11403 if (insn_code_number
>= 0
11404 && insn_code_number
!= NOOP_MOVE_INSN_CODE
)
11406 old_pat
= PATTERN (insn
);
11407 old_notes
= REG_NOTES (insn
);
11408 old_icode
= INSN_CODE (insn
);
11409 PATTERN (insn
) = pat
;
11410 REG_NOTES (insn
) = notes
;
11411 INSN_CODE (insn
) = insn_code_number
;
11413 /* Allow targets to reject combined insn. */
11414 if (!targetm
.legitimate_combined_insn (insn
))
11416 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11417 fputs ("Instruction not appropriate for target.",
11420 /* Callers expect recog_for_combine to strip
11421 clobbers from the pattern on failure. */
11422 pat
= pat_without_clobbers
;
11425 insn_code_number
= -1;
11428 PATTERN (insn
) = old_pat
;
11429 REG_NOTES (insn
) = old_notes
;
11430 INSN_CODE (insn
) = old_icode
;
11436 return insn_code_number
;
11439 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11440 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11441 Return whether anything was so changed. */
11444 change_zero_ext (rtx pat
)
11446 bool changed
= false;
11447 rtx
*src
= &SET_SRC (pat
);
11449 subrtx_ptr_iterator::array_type array
;
11450 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11453 scalar_int_mode mode
, inner_mode
;
11454 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
11458 if (GET_CODE (x
) == ZERO_EXTRACT
11459 && CONST_INT_P (XEXP (x
, 1))
11460 && CONST_INT_P (XEXP (x
, 2))
11461 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
)
11462 && GET_MODE_PRECISION (inner_mode
) <= GET_MODE_PRECISION (mode
))
11464 size
= INTVAL (XEXP (x
, 1));
11466 int start
= INTVAL (XEXP (x
, 2));
11467 if (BITS_BIG_ENDIAN
)
11468 start
= GET_MODE_PRECISION (inner_mode
) - size
- start
;
11471 x
= gen_rtx_LSHIFTRT (inner_mode
, XEXP (x
, 0),
11472 gen_int_shift_amount (inner_mode
, start
));
11475 if (mode
!= inner_mode
)
11476 x
= gen_lowpart_SUBREG (mode
, x
);
11478 else if (GET_CODE (x
) == ZERO_EXTEND
11479 && GET_CODE (XEXP (x
, 0)) == SUBREG
11480 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x
, 0))))
11481 && !paradoxical_subreg_p (XEXP (x
, 0))
11482 && subreg_lowpart_p (XEXP (x
, 0)))
11484 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11485 size
= GET_MODE_PRECISION (inner_mode
);
11486 x
= SUBREG_REG (XEXP (x
, 0));
11487 if (GET_MODE (x
) != mode
)
11488 x
= gen_lowpart_SUBREG (mode
, x
);
11490 else if (GET_CODE (x
) == ZERO_EXTEND
11491 && REG_P (XEXP (x
, 0))
11492 && HARD_REGISTER_P (XEXP (x
, 0))
11493 && can_change_dest_mode (XEXP (x
, 0), 0, mode
))
11495 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11496 size
= GET_MODE_PRECISION (inner_mode
);
11497 x
= gen_rtx_REG (mode
, REGNO (XEXP (x
, 0)));
11502 if (!(GET_CODE (x
) == LSHIFTRT
11503 && CONST_INT_P (XEXP (x
, 1))
11504 && size
+ INTVAL (XEXP (x
, 1)) == GET_MODE_PRECISION (mode
)))
11506 wide_int mask
= wi::mask (size
, false, GET_MODE_PRECISION (mode
));
11507 x
= gen_rtx_AND (mode
, x
, immed_wide_int_const (mask
, mode
));
11515 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11516 maybe_swap_commutative_operands (**iter
);
11518 rtx
*dst
= &SET_DEST (pat
);
11519 scalar_int_mode mode
;
11520 if (GET_CODE (*dst
) == ZERO_EXTRACT
11521 && REG_P (XEXP (*dst
, 0))
11522 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (*dst
, 0)), &mode
)
11523 && CONST_INT_P (XEXP (*dst
, 1))
11524 && CONST_INT_P (XEXP (*dst
, 2)))
11526 rtx reg
= XEXP (*dst
, 0);
11527 int width
= INTVAL (XEXP (*dst
, 1));
11528 int offset
= INTVAL (XEXP (*dst
, 2));
11529 int reg_width
= GET_MODE_PRECISION (mode
);
11530 if (BITS_BIG_ENDIAN
)
11531 offset
= reg_width
- width
- offset
;
11534 wide_int mask
= wi::shifted_mask (offset
, width
, true, reg_width
);
11535 wide_int mask2
= wi::shifted_mask (offset
, width
, false, reg_width
);
11536 x
= gen_rtx_AND (mode
, reg
, immed_wide_int_const (mask
, mode
));
11538 y
= gen_rtx_ASHIFT (mode
, SET_SRC (pat
), GEN_INT (offset
));
11541 z
= gen_rtx_AND (mode
, y
, immed_wide_int_const (mask2
, mode
));
11542 w
= gen_rtx_IOR (mode
, x
, z
);
11543 SUBST (SET_DEST (pat
), reg
);
11544 SUBST (SET_SRC (pat
), w
);
11552 /* Like recog, but we receive the address of a pointer to a new pattern.
11553 We try to match the rtx that the pointer points to.
11554 If that fails, we may try to modify or replace the pattern,
11555 storing the replacement into the same pointer object.
11557 Modifications include deletion or addition of CLOBBERs. If the
11558 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11559 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11560 (and undo if that fails).
11562 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11563 the CLOBBERs are placed.
11565 The value is the final insn code from the pattern ultimately matched,
11569 recog_for_combine (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11571 rtx pat
= *pnewpat
;
11572 int insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11573 if (insn_code_number
>= 0 || check_asm_operands (pat
))
11574 return insn_code_number
;
11576 void *marker
= get_undo_marker ();
11577 bool changed
= false;
11579 if (GET_CODE (pat
) == SET
)
11580 changed
= change_zero_ext (pat
);
11581 else if (GET_CODE (pat
) == PARALLEL
)
11584 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11586 rtx set
= XVECEXP (pat
, 0, i
);
11587 if (GET_CODE (set
) == SET
)
11588 changed
|= change_zero_ext (set
);
11594 insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11596 if (insn_code_number
< 0)
11597 undo_to_marker (marker
);
11600 return insn_code_number
;
11603 /* Like gen_lowpart_general but for use by combine. In combine it
11604 is not possible to create any new pseudoregs. However, it is
11605 safe to create invalid memory addresses, because combine will
11606 try to recognize them and all they will do is make the combine
11609 If for some reason this cannot do its job, an rtx
11610 (clobber (const_int 0)) is returned.
11611 An insn containing that will not be recognized. */
11614 gen_lowpart_for_combine (machine_mode omode
, rtx x
)
11616 machine_mode imode
= GET_MODE (x
);
11617 unsigned int osize
= GET_MODE_SIZE (omode
);
11618 unsigned int isize
= GET_MODE_SIZE (imode
);
11621 if (omode
== imode
)
11624 /* We can only support MODE being wider than a word if X is a
11625 constant integer or has a mode the same size. */
11626 if (GET_MODE_SIZE (omode
) > UNITS_PER_WORD
11627 && ! (CONST_SCALAR_INT_P (x
) || isize
== osize
))
11630 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11631 won't know what to do. So we will strip off the SUBREG here and
11632 process normally. */
11633 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
11635 x
= SUBREG_REG (x
);
11637 /* For use in case we fall down into the address adjustments
11638 further below, we need to adjust the known mode and size of
11639 x; imode and isize, since we just adjusted x. */
11640 imode
= GET_MODE (x
);
11642 if (imode
== omode
)
11645 isize
= GET_MODE_SIZE (imode
);
11648 result
= gen_lowpart_common (omode
, x
);
11655 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11657 if (MEM_VOLATILE_P (x
)
11658 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
)))
11661 /* If we want to refer to something bigger than the original memref,
11662 generate a paradoxical subreg instead. That will force a reload
11663 of the original memref X. */
11664 if (paradoxical_subreg_p (omode
, imode
))
11665 return gen_rtx_SUBREG (omode
, x
, 0);
11667 poly_int64 offset
= byte_lowpart_offset (omode
, imode
);
11668 return adjust_address_nv (x
, omode
, offset
);
11671 /* If X is a comparison operator, rewrite it in a new mode. This
11672 probably won't match, but may allow further simplifications. */
11673 else if (COMPARISON_P (x
))
11674 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
11676 /* If we couldn't simplify X any other way, just enclose it in a
11677 SUBREG. Normally, this SUBREG won't match, but some patterns may
11678 include an explicit SUBREG or we may simplify it further in combine. */
11683 if (imode
== VOIDmode
)
11685 imode
= int_mode_for_mode (omode
).require ();
11686 x
= gen_lowpart_common (imode
, x
);
11690 res
= lowpart_subreg (omode
, x
, imode
);
11696 return gen_rtx_CLOBBER (omode
, const0_rtx
);
11699 /* Try to simplify a comparison between OP0 and a constant OP1,
11700 where CODE is the comparison code that will be tested, into a
11701 (CODE OP0 const0_rtx) form.
11703 The result is a possibly different comparison code to use.
11704 *POP1 may be updated. */
11706 static enum rtx_code
11707 simplify_compare_const (enum rtx_code code
, machine_mode mode
,
11708 rtx op0
, rtx
*pop1
)
11710 scalar_int_mode int_mode
;
11711 HOST_WIDE_INT const_op
= INTVAL (*pop1
);
11713 /* Get the constant we are comparing against and turn off all bits
11714 not on in our mode. */
11715 if (mode
!= VOIDmode
)
11716 const_op
= trunc_int_for_mode (const_op
, mode
);
11718 /* If we are comparing against a constant power of two and the value
11719 being compared can only have that single bit nonzero (e.g., it was
11720 `and'ed with that bit), we can replace this with a comparison
11723 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
11724 || code
== LT
|| code
== LTU
)
11725 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11726 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11727 && pow2p_hwi (const_op
& GET_MODE_MASK (int_mode
))
11728 && (nonzero_bits (op0
, int_mode
)
11729 == (unsigned HOST_WIDE_INT
) (const_op
& GET_MODE_MASK (int_mode
))))
11731 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11735 /* Similarly, if we are comparing a value known to be either -1 or
11736 0 with -1, change it to the opposite comparison against zero. */
11738 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11739 || code
== GEU
|| code
== LTU
)
11740 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11741 && num_sign_bit_copies (op0
, int_mode
) == GET_MODE_PRECISION (int_mode
))
11743 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11747 /* Do some canonicalizations based on the comparison code. We prefer
11748 comparisons against zero and then prefer equality comparisons.
11749 If we can reduce the size of a constant, we will do that too. */
11753 /* < C is equivalent to <= (C - 1) */
11758 /* ... fall through to LE case below. */
11759 gcc_fallthrough ();
11765 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11772 /* If we are doing a <= 0 comparison on a value known to have
11773 a zero sign bit, we can replace this with == 0. */
11774 else if (const_op
== 0
11775 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11776 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11777 && (nonzero_bits (op0
, int_mode
)
11778 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11784 /* >= C is equivalent to > (C - 1). */
11789 /* ... fall through to GT below. */
11790 gcc_fallthrough ();
11796 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11803 /* If we are doing a > 0 comparison on a value known to have
11804 a zero sign bit, we can replace this with != 0. */
11805 else if (const_op
== 0
11806 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11807 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11808 && (nonzero_bits (op0
, int_mode
)
11809 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11815 /* < C is equivalent to <= (C - 1). */
11820 /* ... fall through ... */
11821 gcc_fallthrough ();
11823 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11824 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11825 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11826 && ((unsigned HOST_WIDE_INT
) const_op
11827 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11837 /* unsigned <= 0 is equivalent to == 0 */
11840 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11841 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11842 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11843 && ((unsigned HOST_WIDE_INT
) const_op
11844 == ((HOST_WIDE_INT_1U
11845 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1)))
11853 /* >= C is equivalent to > (C - 1). */
11858 /* ... fall through ... */
11859 gcc_fallthrough ();
11862 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11863 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11864 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11865 && ((unsigned HOST_WIDE_INT
) const_op
11866 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11876 /* unsigned > 0 is equivalent to != 0 */
11879 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11880 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11881 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11882 && ((unsigned HOST_WIDE_INT
) const_op
11883 == (HOST_WIDE_INT_1U
11884 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1))
11895 *pop1
= GEN_INT (const_op
);
11899 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11900 comparison code that will be tested.
11902 The result is a possibly different comparison code to use. *POP0 and
11903 *POP1 may be updated.
11905 It is possible that we might detect that a comparison is either always
11906 true or always false. However, we do not perform general constant
11907 folding in combine, so this knowledge isn't useful. Such tautologies
11908 should have been detected earlier. Hence we ignore all such cases. */
11910 static enum rtx_code
11911 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
11917 scalar_int_mode mode
, inner_mode
, tmode
;
11918 opt_scalar_int_mode tmode_iter
;
11920 /* Try a few ways of applying the same transformation to both operands. */
11923 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11924 so check specially. */
11925 if (!WORD_REGISTER_OPERATIONS
11926 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
11927 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
11928 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11929 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
11930 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
11931 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
11932 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &mode
)
11933 && (is_a
<scalar_int_mode
>
11934 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))), &inner_mode
))
11935 && inner_mode
== GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0)))
11936 && CONST_INT_P (XEXP (op0
, 1))
11937 && XEXP (op0
, 1) == XEXP (op1
, 1)
11938 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
11939 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
11940 && (INTVAL (XEXP (op0
, 1))
11941 == (GET_MODE_PRECISION (mode
)
11942 - GET_MODE_PRECISION (inner_mode
))))
11944 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
11945 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
11948 /* If both operands are the same constant shift, see if we can ignore the
11949 shift. We can if the shift is a rotate or if the bits shifted out of
11950 this shift are known to be zero for both inputs and if the type of
11951 comparison is compatible with the shift. */
11952 if (GET_CODE (op0
) == GET_CODE (op1
)
11953 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
11954 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
11955 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
11956 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
11957 || (GET_CODE (op0
) == ASHIFTRT
11958 && (code
!= GTU
&& code
!= LTU
11959 && code
!= GEU
&& code
!= LEU
)))
11960 && CONST_INT_P (XEXP (op0
, 1))
11961 && INTVAL (XEXP (op0
, 1)) >= 0
11962 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
11963 && XEXP (op0
, 1) == XEXP (op1
, 1))
11965 machine_mode mode
= GET_MODE (op0
);
11966 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11967 int shift_count
= INTVAL (XEXP (op0
, 1));
11969 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
11970 mask
&= (mask
>> shift_count
) << shift_count
;
11971 else if (GET_CODE (op0
) == ASHIFT
)
11972 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
11974 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
11975 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
11976 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
11981 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11982 SUBREGs are of the same mode, and, in both cases, the AND would
11983 be redundant if the comparison was done in the narrower mode,
11984 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11985 and the operand's possibly nonzero bits are 0xffffff01; in that case
11986 if we only care about QImode, we don't need the AND). This case
11987 occurs if the output mode of an scc insn is not SImode and
11988 STORE_FLAG_VALUE == 1 (e.g., the 386).
11990 Similarly, check for a case where the AND's are ZERO_EXTEND
11991 operations from some narrower mode even though a SUBREG is not
11994 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
11995 && CONST_INT_P (XEXP (op0
, 1))
11996 && CONST_INT_P (XEXP (op1
, 1)))
11998 rtx inner_op0
= XEXP (op0
, 0);
11999 rtx inner_op1
= XEXP (op1
, 0);
12000 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
12001 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
12004 if (paradoxical_subreg_p (inner_op0
)
12005 && GET_CODE (inner_op1
) == SUBREG
12006 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0
)))
12007 && (GET_MODE (SUBREG_REG (inner_op0
))
12008 == GET_MODE (SUBREG_REG (inner_op1
)))
12009 && ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
12010 GET_MODE (SUBREG_REG (inner_op0
)))) == 0
12011 && ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
12012 GET_MODE (SUBREG_REG (inner_op1
)))) == 0)
12014 op0
= SUBREG_REG (inner_op0
);
12015 op1
= SUBREG_REG (inner_op1
);
12017 /* The resulting comparison is always unsigned since we masked
12018 off the original sign bit. */
12019 code
= unsigned_condition (code
);
12025 FOR_EACH_MODE_UNTIL (tmode
,
12026 as_a
<scalar_int_mode
> (GET_MODE (op0
)))
12027 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
12029 op0
= gen_lowpart_or_truncate (tmode
, inner_op0
);
12030 op1
= gen_lowpart_or_truncate (tmode
, inner_op1
);
12031 code
= unsigned_condition (code
);
12040 /* If both operands are NOT, we can strip off the outer operation
12041 and adjust the comparison code for swapped operands; similarly for
12042 NEG, except that this must be an equality comparison. */
12043 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
12044 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
12045 && (code
== EQ
|| code
== NE
)))
12046 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
12052 /* If the first operand is a constant, swap the operands and adjust the
12053 comparison code appropriately, but don't do this if the second operand
12054 is already a constant integer. */
12055 if (swap_commutative_operands_p (op0
, op1
))
12057 std::swap (op0
, op1
);
12058 code
= swap_condition (code
);
12061 /* We now enter a loop during which we will try to simplify the comparison.
12062 For the most part, we only are concerned with comparisons with zero,
12063 but some things may really be comparisons with zero but not start
12064 out looking that way. */
12066 while (CONST_INT_P (op1
))
12068 machine_mode raw_mode
= GET_MODE (op0
);
12069 scalar_int_mode int_mode
;
12070 int equality_comparison_p
;
12071 int sign_bit_comparison_p
;
12072 int unsigned_comparison_p
;
12073 HOST_WIDE_INT const_op
;
12075 /* We only want to handle integral modes. This catches VOIDmode,
12076 CCmode, and the floating-point modes. An exception is that we
12077 can handle VOIDmode if OP0 is a COMPARE or a comparison
12080 if (GET_MODE_CLASS (raw_mode
) != MODE_INT
12081 && ! (raw_mode
== VOIDmode
12082 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
12085 /* Try to simplify the compare to constant, possibly changing the
12086 comparison op, and/or changing op1 to zero. */
12087 code
= simplify_compare_const (code
, raw_mode
, op0
, &op1
);
12088 const_op
= INTVAL (op1
);
12090 /* Compute some predicates to simplify code below. */
12092 equality_comparison_p
= (code
== EQ
|| code
== NE
);
12093 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
12094 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
12097 /* If this is a sign bit comparison and we can do arithmetic in
12098 MODE, say that we will only be needing the sign bit of OP0. */
12099 if (sign_bit_comparison_p
12100 && is_a
<scalar_int_mode
> (raw_mode
, &int_mode
)
12101 && HWI_COMPUTABLE_MODE_P (int_mode
))
12102 op0
= force_to_mode (op0
, int_mode
,
12104 << (GET_MODE_PRECISION (int_mode
) - 1),
12107 if (COMPARISON_P (op0
))
12109 /* We can't do anything if OP0 is a condition code value, rather
12110 than an actual data value. */
12112 || CC0_P (XEXP (op0
, 0))
12113 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
12116 /* Get the two operands being compared. */
12117 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
12118 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
12120 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
12122 /* Check for the cases where we simply want the result of the
12123 earlier test or the opposite of that result. */
12124 if (code
== NE
|| code
== EQ
12125 || (val_signbit_known_set_p (raw_mode
, STORE_FLAG_VALUE
)
12126 && (code
== LT
|| code
== GE
)))
12128 enum rtx_code new_code
;
12129 if (code
== LT
|| code
== NE
)
12130 new_code
= GET_CODE (op0
);
12132 new_code
= reversed_comparison_code (op0
, NULL
);
12134 if (new_code
!= UNKNOWN
)
12145 if (raw_mode
== VOIDmode
)
12147 scalar_int_mode mode
= as_a
<scalar_int_mode
> (raw_mode
);
12149 /* Now try cases based on the opcode of OP0. If none of the cases
12150 does a "continue", we exit this loop immediately after the
12153 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
12154 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
12155 switch (GET_CODE (op0
))
12158 /* If we are extracting a single bit from a variable position in
12159 a constant that has only a single bit set and are comparing it
12160 with zero, we can convert this into an equality comparison
12161 between the position and the location of the single bit. */
12162 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12163 have already reduced the shift count modulo the word size. */
12164 if (!SHIFT_COUNT_TRUNCATED
12165 && CONST_INT_P (XEXP (op0
, 0))
12166 && XEXP (op0
, 1) == const1_rtx
12167 && equality_comparison_p
&& const_op
== 0
12168 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
12170 if (BITS_BIG_ENDIAN
)
12171 i
= BITS_PER_WORD
- 1 - i
;
12173 op0
= XEXP (op0
, 2);
12177 /* Result is nonzero iff shift count is equal to I. */
12178 code
= reverse_condition (code
);
12185 tem
= expand_compound_operation (op0
);
12194 /* If testing for equality, we can take the NOT of the constant. */
12195 if (equality_comparison_p
12196 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
12198 op0
= XEXP (op0
, 0);
12203 /* If just looking at the sign bit, reverse the sense of the
12205 if (sign_bit_comparison_p
)
12207 op0
= XEXP (op0
, 0);
12208 code
= (code
== GE
? LT
: GE
);
12214 /* If testing for equality, we can take the NEG of the constant. */
12215 if (equality_comparison_p
12216 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
12218 op0
= XEXP (op0
, 0);
12223 /* The remaining cases only apply to comparisons with zero. */
12227 /* When X is ABS or is known positive,
12228 (neg X) is < 0 if and only if X != 0. */
12230 if (sign_bit_comparison_p
12231 && (GET_CODE (XEXP (op0
, 0)) == ABS
12232 || (mode_width
<= HOST_BITS_PER_WIDE_INT
12233 && (nonzero_bits (XEXP (op0
, 0), mode
)
12234 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12237 op0
= XEXP (op0
, 0);
12238 code
= (code
== LT
? NE
: EQ
);
12242 /* If we have NEG of something whose two high-order bits are the
12243 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12244 if (num_sign_bit_copies (op0
, mode
) >= 2)
12246 op0
= XEXP (op0
, 0);
12247 code
= swap_condition (code
);
12253 /* If we are testing equality and our count is a constant, we
12254 can perform the inverse operation on our RHS. */
12255 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12256 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
12257 op1
, XEXP (op0
, 1))) != 0)
12259 op0
= XEXP (op0
, 0);
12264 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12265 a particular bit. Convert it to an AND of a constant of that
12266 bit. This will be converted into a ZERO_EXTRACT. */
12267 if (const_op
== 0 && sign_bit_comparison_p
12268 && CONST_INT_P (XEXP (op0
, 1))
12269 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12271 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12274 - INTVAL (XEXP (op0
, 1)))));
12275 code
= (code
== LT
? NE
: EQ
);
12279 /* Fall through. */
12282 /* ABS is ignorable inside an equality comparison with zero. */
12283 if (const_op
== 0 && equality_comparison_p
)
12285 op0
= XEXP (op0
, 0);
12291 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12292 (compare FOO CONST) if CONST fits in FOO's mode and we
12293 are either testing inequality or have an unsigned
12294 comparison with ZERO_EXTEND or a signed comparison with
12295 SIGN_EXTEND. But don't do it if we don't have a compare
12296 insn of the given mode, since we'd have to revert it
12297 later on, and then we wouldn't know whether to sign- or
12299 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12300 && ! unsigned_comparison_p
12301 && HWI_COMPUTABLE_MODE_P (mode
)
12302 && trunc_int_for_mode (const_op
, mode
) == const_op
12303 && have_insn_for (COMPARE
, mode
))
12305 op0
= XEXP (op0
, 0);
12311 /* Check for the case where we are comparing A - C1 with C2, that is
12313 (subreg:MODE (plus (A) (-C1))) op (C2)
12315 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12316 comparison in the wider mode. One of the following two conditions
12317 must be true in order for this to be valid:
12319 1. The mode extension results in the same bit pattern being added
12320 on both sides and the comparison is equality or unsigned. As
12321 C2 has been truncated to fit in MODE, the pattern can only be
12324 2. The mode extension results in the sign bit being copied on
12327 The difficulty here is that we have predicates for A but not for
12328 (A - C1) so we need to check that C1 is within proper bounds so
12329 as to perturbate A as little as possible. */
12331 if (mode_width
<= HOST_BITS_PER_WIDE_INT
12332 && subreg_lowpart_p (op0
)
12333 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
12335 && GET_MODE_PRECISION (inner_mode
) > mode_width
12336 && GET_CODE (SUBREG_REG (op0
)) == PLUS
12337 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
12339 rtx a
= XEXP (SUBREG_REG (op0
), 0);
12340 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
12343 && (unsigned HOST_WIDE_INT
) c1
12344 < HOST_WIDE_INT_1U
<< (mode_width
- 1)
12345 && (equality_comparison_p
|| unsigned_comparison_p
)
12346 /* (A - C1) zero-extends if it is positive and sign-extends
12347 if it is negative, C2 both zero- and sign-extends. */
12348 && (((nonzero_bits (a
, inner_mode
)
12349 & ~GET_MODE_MASK (mode
)) == 0
12351 /* (A - C1) sign-extends if it is positive and 1-extends
12352 if it is negative, C2 both sign- and 1-extends. */
12353 || (num_sign_bit_copies (a
, inner_mode
)
12354 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12357 || ((unsigned HOST_WIDE_INT
) c1
12358 < HOST_WIDE_INT_1U
<< (mode_width
- 2)
12359 /* (A - C1) always sign-extends, like C2. */
12360 && num_sign_bit_copies (a
, inner_mode
)
12361 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12362 - (mode_width
- 1))))
12364 op0
= SUBREG_REG (op0
);
12369 /* If the inner mode is narrower and we are extracting the low part,
12370 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12371 if (paradoxical_subreg_p (op0
))
12373 else if (subreg_lowpart_p (op0
)
12374 && GET_MODE_CLASS (mode
) == MODE_INT
12375 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12376 && (code
== NE
|| code
== EQ
)
12377 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12378 && !paradoxical_subreg_p (op0
)
12379 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12380 & ~GET_MODE_MASK (mode
)) == 0)
12382 /* Remove outer subregs that don't do anything. */
12383 tem
= gen_lowpart (inner_mode
, op1
);
12385 if ((nonzero_bits (tem
, inner_mode
)
12386 & ~GET_MODE_MASK (mode
)) == 0)
12388 op0
= SUBREG_REG (op0
);
12400 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12401 && (unsigned_comparison_p
|| equality_comparison_p
)
12402 && HWI_COMPUTABLE_MODE_P (mode
)
12403 && (unsigned HOST_WIDE_INT
) const_op
<= GET_MODE_MASK (mode
)
12405 && have_insn_for (COMPARE
, mode
))
12407 op0
= XEXP (op0
, 0);
12413 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12414 this for equality comparisons due to pathological cases involving
12416 if (equality_comparison_p
12417 && (tem
= simplify_binary_operation (MINUS
, mode
,
12418 op1
, XEXP (op0
, 1))) != 0)
12420 op0
= XEXP (op0
, 0);
12425 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12426 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
12427 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
12429 op0
= XEXP (XEXP (op0
, 0), 0);
12430 code
= (code
== LT
? EQ
: NE
);
12436 /* We used to optimize signed comparisons against zero, but that
12437 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12438 arrive here as equality comparisons, or (GEU, LTU) are
12439 optimized away. No need to special-case them. */
12441 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12442 (eq B (minus A C)), whichever simplifies. We can only do
12443 this for equality comparisons due to pathological cases involving
12445 if (equality_comparison_p
12446 && (tem
= simplify_binary_operation (PLUS
, mode
,
12447 XEXP (op0
, 1), op1
)) != 0)
12449 op0
= XEXP (op0
, 0);
12454 if (equality_comparison_p
12455 && (tem
= simplify_binary_operation (MINUS
, mode
,
12456 XEXP (op0
, 0), op1
)) != 0)
12458 op0
= XEXP (op0
, 1);
12463 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12464 of bits in X minus 1, is one iff X > 0. */
12465 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
12466 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12467 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
12468 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12470 op0
= XEXP (op0
, 1);
12471 code
= (code
== GE
? LE
: GT
);
12477 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12478 if C is zero or B is a constant. */
12479 if (equality_comparison_p
12480 && (tem
= simplify_binary_operation (XOR
, mode
,
12481 XEXP (op0
, 1), op1
)) != 0)
12483 op0
= XEXP (op0
, 0);
12491 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12493 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
12494 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
12495 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12497 op0
= XEXP (op0
, 1);
12498 code
= (code
== GE
? GT
: LE
);
12504 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12505 will be converted to a ZERO_EXTRACT later. */
12506 if (const_op
== 0 && equality_comparison_p
12507 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12508 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
12510 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
12511 XEXP (XEXP (op0
, 0), 1));
12512 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12516 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12517 zero and X is a comparison and C1 and C2 describe only bits set
12518 in STORE_FLAG_VALUE, we can compare with X. */
12519 if (const_op
== 0 && equality_comparison_p
12520 && mode_width
<= HOST_BITS_PER_WIDE_INT
12521 && CONST_INT_P (XEXP (op0
, 1))
12522 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
12523 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12524 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
12525 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
12527 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12528 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
12529 if ((~STORE_FLAG_VALUE
& mask
) == 0
12530 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
12531 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
12532 && COMPARISON_P (tem
))))
12534 op0
= XEXP (XEXP (op0
, 0), 0);
12539 /* If we are doing an equality comparison of an AND of a bit equal
12540 to the sign bit, replace this with a LT or GE comparison of
12541 the underlying value. */
12542 if (equality_comparison_p
12544 && CONST_INT_P (XEXP (op0
, 1))
12545 && mode_width
<= HOST_BITS_PER_WIDE_INT
12546 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12547 == HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12549 op0
= XEXP (op0
, 0);
12550 code
= (code
== EQ
? GE
: LT
);
12554 /* If this AND operation is really a ZERO_EXTEND from a narrower
12555 mode, the constant fits within that mode, and this is either an
12556 equality or unsigned comparison, try to do this comparison in
12561 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12562 -> (ne:DI (reg:SI 4) (const_int 0))
12564 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12565 known to hold a value of the required mode the
12566 transformation is invalid. */
12567 if ((equality_comparison_p
|| unsigned_comparison_p
)
12568 && CONST_INT_P (XEXP (op0
, 1))
12569 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
12570 & GET_MODE_MASK (mode
))
12572 && const_op
>> i
== 0
12573 && int_mode_for_size (i
, 1).exists (&tmode
))
12575 op0
= gen_lowpart_or_truncate (tmode
, XEXP (op0
, 0));
12579 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12580 fits in both M1 and M2 and the SUBREG is either paradoxical
12581 or represents the low part, permute the SUBREG and the AND
12583 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
12584 && CONST_INT_P (XEXP (op0
, 1)))
12586 unsigned HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
12587 /* Require an integral mode, to avoid creating something like
12589 if ((is_a
<scalar_int_mode
>
12590 (GET_MODE (SUBREG_REG (XEXP (op0
, 0))), &tmode
))
12591 /* It is unsafe to commute the AND into the SUBREG if the
12592 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12593 not defined. As originally written the upper bits
12594 have a defined value due to the AND operation.
12595 However, if we commute the AND inside the SUBREG then
12596 they no longer have defined values and the meaning of
12597 the code has been changed.
12598 Also C1 should not change value in the smaller mode,
12599 see PR67028 (a positive C1 can become negative in the
12600 smaller mode, so that the AND does no longer mask the
12602 && ((WORD_REGISTER_OPERATIONS
12603 && mode_width
> GET_MODE_PRECISION (tmode
)
12604 && mode_width
<= BITS_PER_WORD
12605 && trunc_int_for_mode (c1
, tmode
) == (HOST_WIDE_INT
) c1
)
12606 || (mode_width
<= GET_MODE_PRECISION (tmode
)
12607 && subreg_lowpart_p (XEXP (op0
, 0))))
12608 && mode_width
<= HOST_BITS_PER_WIDE_INT
12609 && HWI_COMPUTABLE_MODE_P (tmode
)
12610 && (c1
& ~mask
) == 0
12611 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
12613 && c1
!= GET_MODE_MASK (tmode
))
12615 op0
= simplify_gen_binary (AND
, tmode
,
12616 SUBREG_REG (XEXP (op0
, 0)),
12617 gen_int_mode (c1
, tmode
));
12618 op0
= gen_lowpart (mode
, op0
);
12623 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12624 if (const_op
== 0 && equality_comparison_p
12625 && XEXP (op0
, 1) == const1_rtx
12626 && GET_CODE (XEXP (op0
, 0)) == NOT
)
12628 op0
= simplify_and_const_int (NULL_RTX
, mode
,
12629 XEXP (XEXP (op0
, 0), 0), 1);
12630 code
= (code
== NE
? EQ
: NE
);
12634 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12635 (eq (and (lshiftrt X) 1) 0).
12636 Also handle the case where (not X) is expressed using xor. */
12637 if (const_op
== 0 && equality_comparison_p
12638 && XEXP (op0
, 1) == const1_rtx
12639 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
12641 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
12642 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
12644 if (GET_CODE (shift_op
) == NOT
12645 || (GET_CODE (shift_op
) == XOR
12646 && CONST_INT_P (XEXP (shift_op
, 1))
12647 && CONST_INT_P (shift_count
)
12648 && HWI_COMPUTABLE_MODE_P (mode
)
12649 && (UINTVAL (XEXP (shift_op
, 1))
12650 == HOST_WIDE_INT_1U
12651 << INTVAL (shift_count
))))
12654 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
12655 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12656 code
= (code
== NE
? EQ
: NE
);
12663 /* If we have (compare (ashift FOO N) (const_int C)) and
12664 the high order N bits of FOO (N+1 if an inequality comparison)
12665 are known to be zero, we can do this by comparing FOO with C
12666 shifted right N bits so long as the low-order N bits of C are
12668 if (CONST_INT_P (XEXP (op0
, 1))
12669 && INTVAL (XEXP (op0
, 1)) >= 0
12670 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
12671 < HOST_BITS_PER_WIDE_INT
)
12672 && (((unsigned HOST_WIDE_INT
) const_op
12673 & ((HOST_WIDE_INT_1U
<< INTVAL (XEXP (op0
, 1)))
12675 && mode_width
<= HOST_BITS_PER_WIDE_INT
12676 && (nonzero_bits (XEXP (op0
, 0), mode
)
12677 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
12678 + ! equality_comparison_p
))) == 0)
12680 /* We must perform a logical shift, not an arithmetic one,
12681 as we want the top N bits of C to be zero. */
12682 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
12684 temp
>>= INTVAL (XEXP (op0
, 1));
12685 op1
= gen_int_mode (temp
, mode
);
12686 op0
= XEXP (op0
, 0);
12690 /* If we are doing a sign bit comparison, it means we are testing
12691 a particular bit. Convert it to the appropriate AND. */
12692 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12693 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12695 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12698 - INTVAL (XEXP (op0
, 1)))));
12699 code
= (code
== LT
? NE
: EQ
);
12703 /* If this an equality comparison with zero and we are shifting
12704 the low bit to the sign bit, we can convert this to an AND of the
12706 if (const_op
== 0 && equality_comparison_p
12707 && CONST_INT_P (XEXP (op0
, 1))
12708 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12710 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
12716 /* If this is an equality comparison with zero, we can do this
12717 as a logical shift, which might be much simpler. */
12718 if (equality_comparison_p
&& const_op
== 0
12719 && CONST_INT_P (XEXP (op0
, 1)))
12721 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
12723 INTVAL (XEXP (op0
, 1)));
12727 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12728 do the comparison in a narrower mode. */
12729 if (! unsigned_comparison_p
12730 && CONST_INT_P (XEXP (op0
, 1))
12731 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12732 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12733 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12735 && (((unsigned HOST_WIDE_INT
) const_op
12736 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12737 <= GET_MODE_MASK (tmode
)))
12739 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
12743 /* Likewise if OP0 is a PLUS of a sign extension with a
12744 constant, which is usually represented with the PLUS
12745 between the shifts. */
12746 if (! unsigned_comparison_p
12747 && CONST_INT_P (XEXP (op0
, 1))
12748 && GET_CODE (XEXP (op0
, 0)) == PLUS
12749 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12750 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
12751 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
12752 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12754 && (((unsigned HOST_WIDE_INT
) const_op
12755 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12756 <= GET_MODE_MASK (tmode
)))
12758 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
12759 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
12760 rtx new_const
= simplify_gen_binary (ASHIFTRT
, mode
,
12761 add_const
, XEXP (op0
, 1));
12763 op0
= simplify_gen_binary (PLUS
, tmode
,
12764 gen_lowpart (tmode
, inner
),
12771 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12772 the low order N bits of FOO are known to be zero, we can do this
12773 by comparing FOO with C shifted left N bits so long as no
12774 overflow occurs. Even if the low order N bits of FOO aren't known
12775 to be zero, if the comparison is >= or < we can use the same
12776 optimization and for > or <= by setting all the low
12777 order N bits in the comparison constant. */
12778 if (CONST_INT_P (XEXP (op0
, 1))
12779 && INTVAL (XEXP (op0
, 1)) > 0
12780 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12781 && mode_width
<= HOST_BITS_PER_WIDE_INT
12782 && (((unsigned HOST_WIDE_INT
) const_op
12783 + (GET_CODE (op0
) != LSHIFTRT
12784 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
12787 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
12789 unsigned HOST_WIDE_INT low_bits
12790 = (nonzero_bits (XEXP (op0
, 0), mode
)
12791 & ((HOST_WIDE_INT_1U
12792 << INTVAL (XEXP (op0
, 1))) - 1));
12793 if (low_bits
== 0 || !equality_comparison_p
)
12795 /* If the shift was logical, then we must make the condition
12797 if (GET_CODE (op0
) == LSHIFTRT
)
12798 code
= unsigned_condition (code
);
12800 const_op
= (unsigned HOST_WIDE_INT
) const_op
12801 << INTVAL (XEXP (op0
, 1));
12803 && (code
== GT
|| code
== GTU
12804 || code
== LE
|| code
== LEU
))
12806 |= ((HOST_WIDE_INT_1
<< INTVAL (XEXP (op0
, 1))) - 1);
12807 op1
= GEN_INT (const_op
);
12808 op0
= XEXP (op0
, 0);
12813 /* If we are using this shift to extract just the sign bit, we
12814 can replace this with an LT or GE comparison. */
12816 && (equality_comparison_p
|| sign_bit_comparison_p
)
12817 && CONST_INT_P (XEXP (op0
, 1))
12818 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12820 op0
= XEXP (op0
, 0);
12821 code
= (code
== NE
|| code
== GT
? LT
: GE
);
12833 /* Now make any compound operations involved in this comparison. Then,
12834 check for an outmost SUBREG on OP0 that is not doing anything or is
12835 paradoxical. The latter transformation must only be performed when
12836 it is known that the "extra" bits will be the same in op0 and op1 or
12837 that they don't matter. There are three cases to consider:
12839 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12840 care bits and we can assume they have any convenient value. So
12841 making the transformation is safe.
12843 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12844 In this case the upper bits of op0 are undefined. We should not make
12845 the simplification in that case as we do not know the contents of
12848 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12849 In that case we know those bits are zeros or ones. We must also be
12850 sure that they are the same as the upper bits of op1.
12852 We can never remove a SUBREG for a non-equality comparison because
12853 the sign bit is in a different place in the underlying object. */
12855 rtx_code op0_mco_code
= SET
;
12856 if (op1
== const0_rtx
)
12857 op0_mco_code
= code
== NE
|| code
== EQ
? EQ
: COMPARE
;
12859 op0
= make_compound_operation (op0
, op0_mco_code
);
12860 op1
= make_compound_operation (op1
, SET
);
12862 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
12863 && is_int_mode (GET_MODE (op0
), &mode
)
12864 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12865 && (code
== NE
|| code
== EQ
))
12867 if (paradoxical_subreg_p (op0
))
12869 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12871 if (REG_P (SUBREG_REG (op0
)))
12873 op0
= SUBREG_REG (op0
);
12874 op1
= gen_lowpart (inner_mode
, op1
);
12877 else if (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12878 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12879 & ~GET_MODE_MASK (mode
)) == 0)
12881 tem
= gen_lowpart (inner_mode
, op1
);
12883 if ((nonzero_bits (tem
, inner_mode
) & ~GET_MODE_MASK (mode
)) == 0)
12884 op0
= SUBREG_REG (op0
), op1
= tem
;
12888 /* We now do the opposite procedure: Some machines don't have compare
12889 insns in all modes. If OP0's mode is an integer mode smaller than a
12890 word and we can't do a compare in that mode, see if there is a larger
12891 mode for which we can do the compare. There are a number of cases in
12892 which we can use the wider mode. */
12894 if (is_int_mode (GET_MODE (op0
), &mode
)
12895 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
12896 && ! have_insn_for (COMPARE
, mode
))
12897 FOR_EACH_WIDER_MODE (tmode_iter
, mode
)
12899 tmode
= tmode_iter
.require ();
12900 if (!HWI_COMPUTABLE_MODE_P (tmode
))
12902 if (have_insn_for (COMPARE
, tmode
))
12906 /* If this is a test for negative, we can make an explicit
12907 test of the sign bit. Test this first so we can use
12908 a paradoxical subreg to extend OP0. */
12910 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
12911 && HWI_COMPUTABLE_MODE_P (mode
))
12913 unsigned HOST_WIDE_INT sign
12914 = HOST_WIDE_INT_1U
<< (GET_MODE_BITSIZE (mode
) - 1);
12915 op0
= simplify_gen_binary (AND
, tmode
,
12916 gen_lowpart (tmode
, op0
),
12917 gen_int_mode (sign
, tmode
));
12918 code
= (code
== LT
) ? NE
: EQ
;
12922 /* If the only nonzero bits in OP0 and OP1 are those in the
12923 narrower mode and this is an equality or unsigned comparison,
12924 we can use the wider mode. Similarly for sign-extended
12925 values, in which case it is true for all comparisons. */
12926 zero_extended
= ((code
== EQ
|| code
== NE
12927 || code
== GEU
|| code
== GTU
12928 || code
== LEU
|| code
== LTU
)
12929 && (nonzero_bits (op0
, tmode
)
12930 & ~GET_MODE_MASK (mode
)) == 0
12931 && ((CONST_INT_P (op1
)
12932 || (nonzero_bits (op1
, tmode
)
12933 & ~GET_MODE_MASK (mode
)) == 0)));
12936 || ((num_sign_bit_copies (op0
, tmode
)
12937 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12938 - GET_MODE_PRECISION (mode
)))
12939 && (num_sign_bit_copies (op1
, tmode
)
12940 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12941 - GET_MODE_PRECISION (mode
)))))
12943 /* If OP0 is an AND and we don't have an AND in MODE either,
12944 make a new AND in the proper mode. */
12945 if (GET_CODE (op0
) == AND
12946 && !have_insn_for (AND
, mode
))
12947 op0
= simplify_gen_binary (AND
, tmode
,
12948 gen_lowpart (tmode
,
12950 gen_lowpart (tmode
,
12956 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
12958 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
12963 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
12965 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
12974 /* We may have changed the comparison operands. Re-canonicalize. */
12975 if (swap_commutative_operands_p (op0
, op1
))
12977 std::swap (op0
, op1
);
12978 code
= swap_condition (code
);
12981 /* If this machine only supports a subset of valid comparisons, see if we
12982 can convert an unsupported one into a supported one. */
12983 target_canonicalize_comparison (&code
, &op0
, &op1
, 0);
12991 /* Utility function for record_value_for_reg. Count number of
12996 enum rtx_code code
= GET_CODE (x
);
13000 if (GET_RTX_CLASS (code
) == RTX_BIN_ARITH
13001 || GET_RTX_CLASS (code
) == RTX_COMM_ARITH
)
13003 rtx x0
= XEXP (x
, 0);
13004 rtx x1
= XEXP (x
, 1);
13007 return 1 + 2 * count_rtxs (x0
);
13009 if ((GET_RTX_CLASS (GET_CODE (x1
)) == RTX_BIN_ARITH
13010 || GET_RTX_CLASS (GET_CODE (x1
)) == RTX_COMM_ARITH
)
13011 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13012 return 2 + 2 * count_rtxs (x0
)
13013 + count_rtxs (x
== XEXP (x1
, 0)
13014 ? XEXP (x1
, 1) : XEXP (x1
, 0));
13016 if ((GET_RTX_CLASS (GET_CODE (x0
)) == RTX_BIN_ARITH
13017 || GET_RTX_CLASS (GET_CODE (x0
)) == RTX_COMM_ARITH
)
13018 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13019 return 2 + 2 * count_rtxs (x1
)
13020 + count_rtxs (x
== XEXP (x0
, 0)
13021 ? XEXP (x0
, 1) : XEXP (x0
, 0));
13024 fmt
= GET_RTX_FORMAT (code
);
13025 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13027 ret
+= count_rtxs (XEXP (x
, i
));
13028 else if (fmt
[i
] == 'E')
13029 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13030 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
13035 /* Utility function for following routine. Called when X is part of a value
13036 being stored into last_set_value. Sets last_set_table_tick
13037 for each register mentioned. Similar to mention_regs in cse.c */
13040 update_table_tick (rtx x
)
13042 enum rtx_code code
= GET_CODE (x
);
13043 const char *fmt
= GET_RTX_FORMAT (code
);
13048 unsigned int regno
= REGNO (x
);
13049 unsigned int endregno
= END_REGNO (x
);
13052 for (r
= regno
; r
< endregno
; r
++)
13054 reg_stat_type
*rsp
= ®_stat
[r
];
13055 rsp
->last_set_table_tick
= label_tick
;
13061 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13064 /* Check for identical subexpressions. If x contains
13065 identical subexpression we only have to traverse one of
13067 if (i
== 0 && ARITHMETIC_P (x
))
13069 /* Note that at this point x1 has already been
13071 rtx x0
= XEXP (x
, 0);
13072 rtx x1
= XEXP (x
, 1);
13074 /* If x0 and x1 are identical then there is no need to
13079 /* If x0 is identical to a subexpression of x1 then while
13080 processing x1, x0 has already been processed. Thus we
13081 are done with x. */
13082 if (ARITHMETIC_P (x1
)
13083 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13086 /* If x1 is identical to a subexpression of x0 then we
13087 still have to process the rest of x0. */
13088 if (ARITHMETIC_P (x0
)
13089 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13091 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
13096 update_table_tick (XEXP (x
, i
));
13098 else if (fmt
[i
] == 'E')
13099 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13100 update_table_tick (XVECEXP (x
, i
, j
));
13103 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13104 are saying that the register is clobbered and we no longer know its
13105 value. If INSN is zero, don't update reg_stat[].last_set; this is
13106 only permitted with VALUE also zero and is used to invalidate the
13110 record_value_for_reg (rtx reg
, rtx_insn
*insn
, rtx value
)
13112 unsigned int regno
= REGNO (reg
);
13113 unsigned int endregno
= END_REGNO (reg
);
13115 reg_stat_type
*rsp
;
13117 /* If VALUE contains REG and we have a previous value for REG, substitute
13118 the previous value. */
13119 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
13123 /* Set things up so get_last_value is allowed to see anything set up to
13125 subst_low_luid
= DF_INSN_LUID (insn
);
13126 tem
= get_last_value (reg
);
13128 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13129 it isn't going to be useful and will take a lot of time to process,
13130 so just use the CLOBBER. */
13134 if (ARITHMETIC_P (tem
)
13135 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
13136 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
13137 tem
= XEXP (tem
, 0);
13138 else if (count_occurrences (value
, reg
, 1) >= 2)
13140 /* If there are two or more occurrences of REG in VALUE,
13141 prevent the value from growing too much. */
13142 if (count_rtxs (tem
) > MAX_LAST_VALUE_RTL
)
13143 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
13146 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
13150 /* For each register modified, show we don't know its value, that
13151 we don't know about its bitwise content, that its value has been
13152 updated, and that we don't know the location of the death of the
13154 for (i
= regno
; i
< endregno
; i
++)
13156 rsp
= ®_stat
[i
];
13159 rsp
->last_set
= insn
;
13161 rsp
->last_set_value
= 0;
13162 rsp
->last_set_mode
= VOIDmode
;
13163 rsp
->last_set_nonzero_bits
= 0;
13164 rsp
->last_set_sign_bit_copies
= 0;
13165 rsp
->last_death
= 0;
13166 rsp
->truncated_to_mode
= VOIDmode
;
13169 /* Mark registers that are being referenced in this value. */
13171 update_table_tick (value
);
13173 /* Now update the status of each register being set.
13174 If someone is using this register in this block, set this register
13175 to invalid since we will get confused between the two lives in this
13176 basic block. This makes using this register always invalid. In cse, we
13177 scan the table to invalidate all entries using this register, but this
13178 is too much work for us. */
13180 for (i
= regno
; i
< endregno
; i
++)
13182 rsp
= ®_stat
[i
];
13183 rsp
->last_set_label
= label_tick
;
13185 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
13186 rsp
->last_set_invalid
= 1;
13188 rsp
->last_set_invalid
= 0;
13191 /* The value being assigned might refer to X (like in "x++;"). In that
13192 case, we must replace it with (clobber (const_int 0)) to prevent
13194 rsp
= ®_stat
[regno
];
13195 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
13197 value
= copy_rtx (value
);
13198 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
13202 /* For the main register being modified, update the value, the mode, the
13203 nonzero bits, and the number of sign bit copies. */
13205 rsp
->last_set_value
= value
;
13209 machine_mode mode
= GET_MODE (reg
);
13210 subst_low_luid
= DF_INSN_LUID (insn
);
13211 rsp
->last_set_mode
= mode
;
13212 if (GET_MODE_CLASS (mode
) == MODE_INT
13213 && HWI_COMPUTABLE_MODE_P (mode
))
13214 mode
= nonzero_bits_mode
;
13215 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
13216 rsp
->last_set_sign_bit_copies
13217 = num_sign_bit_copies (value
, GET_MODE (reg
));
13221 /* Called via note_stores from record_dead_and_set_regs to handle one
13222 SET or CLOBBER in an insn. DATA is the instruction in which the
13223 set is occurring. */
13226 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
13228 rtx_insn
*record_dead_insn
= (rtx_insn
*) data
;
13230 if (GET_CODE (dest
) == SUBREG
)
13231 dest
= SUBREG_REG (dest
);
13233 if (!record_dead_insn
)
13236 record_value_for_reg (dest
, NULL
, NULL_RTX
);
13242 /* If we are setting the whole register, we know its value. Otherwise
13243 show that we don't know the value. We can handle SUBREG in
13245 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
13246 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
13247 else if (GET_CODE (setter
) == SET
13248 && GET_CODE (SET_DEST (setter
)) == SUBREG
13249 && SUBREG_REG (SET_DEST (setter
)) == dest
13250 && GET_MODE_PRECISION (GET_MODE (dest
)) <= BITS_PER_WORD
13251 && subreg_lowpart_p (SET_DEST (setter
)))
13252 record_value_for_reg (dest
, record_dead_insn
,
13253 gen_lowpart (GET_MODE (dest
),
13254 SET_SRC (setter
)));
13256 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
13258 else if (MEM_P (dest
)
13259 /* Ignore pushes, they clobber nothing. */
13260 && ! push_operand (dest
, GET_MODE (dest
)))
13261 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
13264 /* Update the records of when each REG was most recently set or killed
13265 for the things done by INSN. This is the last thing done in processing
13266 INSN in the combiner loop.
13268 We update reg_stat[], in particular fields last_set, last_set_value,
13269 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13270 last_death, and also the similar information mem_last_set (which insn
13271 most recently modified memory) and last_call_luid (which insn was the
13272 most recent subroutine call). */
13275 record_dead_and_set_regs (rtx_insn
*insn
)
13280 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
13282 if (REG_NOTE_KIND (link
) == REG_DEAD
13283 && REG_P (XEXP (link
, 0)))
13285 unsigned int regno
= REGNO (XEXP (link
, 0));
13286 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
13288 for (i
= regno
; i
< endregno
; i
++)
13290 reg_stat_type
*rsp
;
13292 rsp
= ®_stat
[i
];
13293 rsp
->last_death
= insn
;
13296 else if (REG_NOTE_KIND (link
) == REG_INC
)
13297 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
13302 hard_reg_set_iterator hrsi
;
13303 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call
, 0, i
, hrsi
)
13305 reg_stat_type
*rsp
;
13307 rsp
= ®_stat
[i
];
13308 rsp
->last_set_invalid
= 1;
13309 rsp
->last_set
= insn
;
13310 rsp
->last_set_value
= 0;
13311 rsp
->last_set_mode
= VOIDmode
;
13312 rsp
->last_set_nonzero_bits
= 0;
13313 rsp
->last_set_sign_bit_copies
= 0;
13314 rsp
->last_death
= 0;
13315 rsp
->truncated_to_mode
= VOIDmode
;
13318 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
13320 /* We can't combine into a call pattern. Remember, though, that
13321 the return value register is set at this LUID. We could
13322 still replace a register with the return value from the
13323 wrong subroutine call! */
13324 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, NULL_RTX
);
13327 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, insn
);
13330 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13331 register present in the SUBREG, so for each such SUBREG go back and
13332 adjust nonzero and sign bit information of the registers that are
13333 known to have some zero/sign bits set.
13335 This is needed because when combine blows the SUBREGs away, the
13336 information on zero/sign bits is lost and further combines can be
13337 missed because of that. */
13340 record_promoted_value (rtx_insn
*insn
, rtx subreg
)
13342 struct insn_link
*links
;
13344 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
13345 machine_mode mode
= GET_MODE (subreg
);
13347 if (!HWI_COMPUTABLE_MODE_P (mode
))
13350 for (links
= LOG_LINKS (insn
); links
;)
13352 reg_stat_type
*rsp
;
13354 insn
= links
->insn
;
13355 set
= single_set (insn
);
13357 if (! set
|| !REG_P (SET_DEST (set
))
13358 || REGNO (SET_DEST (set
)) != regno
13359 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
13361 links
= links
->next
;
13365 rsp
= ®_stat
[regno
];
13366 if (rsp
->last_set
== insn
)
13368 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
))
13369 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
13372 if (REG_P (SET_SRC (set
)))
13374 regno
= REGNO (SET_SRC (set
));
13375 links
= LOG_LINKS (insn
);
13382 /* Check if X, a register, is known to contain a value already
13383 truncated to MODE. In this case we can use a subreg to refer to
13384 the truncated value even though in the generic case we would need
13385 an explicit truncation. */
13388 reg_truncated_to_mode (machine_mode mode
, const_rtx x
)
13390 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
13391 machine_mode truncated
= rsp
->truncated_to_mode
;
13394 || rsp
->truncation_label
< label_tick_ebb_start
)
13396 if (!partial_subreg_p (mode
, truncated
))
13398 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, truncated
))
13403 /* If X is a hard reg or a subreg record the mode that the register is
13404 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13405 able to turn a truncate into a subreg using this information. Return true
13406 if traversing X is complete. */
13409 record_truncated_value (rtx x
)
13411 machine_mode truncated_mode
;
13412 reg_stat_type
*rsp
;
13414 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
13416 machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
13417 truncated_mode
= GET_MODE (x
);
13419 if (!partial_subreg_p (truncated_mode
, original_mode
))
13422 truncated_mode
= GET_MODE (x
);
13423 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode
, original_mode
))
13426 x
= SUBREG_REG (x
);
13428 /* ??? For hard-regs we now record everything. We might be able to
13429 optimize this using last_set_mode. */
13430 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
13431 truncated_mode
= GET_MODE (x
);
13435 rsp
= ®_stat
[REGNO (x
)];
13436 if (rsp
->truncated_to_mode
== 0
13437 || rsp
->truncation_label
< label_tick_ebb_start
13438 || partial_subreg_p (truncated_mode
, rsp
->truncated_to_mode
))
13440 rsp
->truncated_to_mode
= truncated_mode
;
13441 rsp
->truncation_label
= label_tick
;
13447 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13448 the modes they are used in. This can help truning TRUNCATEs into
13452 record_truncated_values (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
13454 subrtx_var_iterator::array_type array
;
13455 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
13456 if (record_truncated_value (*iter
))
13457 iter
.skip_subrtxes ();
13460 /* Scan X for promoted SUBREGs. For each one found,
13461 note what it implies to the registers used in it. */
13464 check_promoted_subreg (rtx_insn
*insn
, rtx x
)
13466 if (GET_CODE (x
) == SUBREG
13467 && SUBREG_PROMOTED_VAR_P (x
)
13468 && REG_P (SUBREG_REG (x
)))
13469 record_promoted_value (insn
, x
);
13472 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
13475 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
13479 check_promoted_subreg (insn
, XEXP (x
, i
));
13483 if (XVEC (x
, i
) != 0)
13484 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13485 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
13491 /* Verify that all the registers and memory references mentioned in *LOC are
13492 still valid. *LOC was part of a value set in INSN when label_tick was
13493 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13494 the invalid references with (clobber (const_int 0)) and return 1. This
13495 replacement is useful because we often can get useful information about
13496 the form of a value (e.g., if it was produced by a shift that always
13497 produces -1 or 0) even though we don't know exactly what registers it
13498 was produced from. */
13501 get_last_value_validate (rtx
*loc
, rtx_insn
*insn
, int tick
, int replace
)
13504 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
13505 int len
= GET_RTX_LENGTH (GET_CODE (x
));
13510 unsigned int regno
= REGNO (x
);
13511 unsigned int endregno
= END_REGNO (x
);
13514 for (j
= regno
; j
< endregno
; j
++)
13516 reg_stat_type
*rsp
= ®_stat
[j
];
13517 if (rsp
->last_set_invalid
13518 /* If this is a pseudo-register that was only set once and not
13519 live at the beginning of the function, it is always valid. */
13520 || (! (regno
>= FIRST_PSEUDO_REGISTER
13521 && regno
< reg_n_sets_max
13522 && REG_N_SETS (regno
) == 1
13523 && (!REGNO_REG_SET_P
13524 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
13526 && rsp
->last_set_label
> tick
))
13529 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13536 /* If this is a memory reference, make sure that there were no stores after
13537 it that might have clobbered the value. We don't have alias info, so we
13538 assume any store invalidates it. Moreover, we only have local UIDs, so
13539 we also assume that there were stores in the intervening basic blocks. */
13540 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
13541 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
13544 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13548 for (i
= 0; i
< len
; i
++)
13552 /* Check for identical subexpressions. If x contains
13553 identical subexpression we only have to traverse one of
13555 if (i
== 1 && ARITHMETIC_P (x
))
13557 /* Note that at this point x0 has already been checked
13558 and found valid. */
13559 rtx x0
= XEXP (x
, 0);
13560 rtx x1
= XEXP (x
, 1);
13562 /* If x0 and x1 are identical then x is also valid. */
13566 /* If x1 is identical to a subexpression of x0 then
13567 while checking x0, x1 has already been checked. Thus
13568 it is valid and so as x. */
13569 if (ARITHMETIC_P (x0
)
13570 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13573 /* If x0 is identical to a subexpression of x1 then x is
13574 valid iff the rest of x1 is valid. */
13575 if (ARITHMETIC_P (x1
)
13576 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13578 get_last_value_validate (&XEXP (x1
,
13579 x0
== XEXP (x1
, 0) ? 1 : 0),
13580 insn
, tick
, replace
);
13583 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
13587 else if (fmt
[i
] == 'E')
13588 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13589 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
13590 insn
, tick
, replace
) == 0)
13594 /* If we haven't found a reason for it to be invalid, it is valid. */
13598 /* Get the last value assigned to X, if known. Some registers
13599 in the value may be replaced with (clobber (const_int 0)) if their value
13600 is known longer known reliably. */
13603 get_last_value (const_rtx x
)
13605 unsigned int regno
;
13607 reg_stat_type
*rsp
;
13609 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13610 then convert it to the desired mode. If this is a paradoxical SUBREG,
13611 we cannot predict what values the "extra" bits might have. */
13612 if (GET_CODE (x
) == SUBREG
13613 && subreg_lowpart_p (x
)
13614 && !paradoxical_subreg_p (x
)
13615 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
13616 return gen_lowpart (GET_MODE (x
), value
);
13622 rsp
= ®_stat
[regno
];
13623 value
= rsp
->last_set_value
;
13625 /* If we don't have a value, or if it isn't for this basic block and
13626 it's either a hard register, set more than once, or it's a live
13627 at the beginning of the function, return 0.
13629 Because if it's not live at the beginning of the function then the reg
13630 is always set before being used (is never used without being set).
13631 And, if it's set only once, and it's always set before use, then all
13632 uses must have the same last value, even if it's not from this basic
13636 || (rsp
->last_set_label
< label_tick_ebb_start
13637 && (regno
< FIRST_PSEUDO_REGISTER
13638 || regno
>= reg_n_sets_max
13639 || REG_N_SETS (regno
) != 1
13641 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), regno
))))
13644 /* If the value was set in a later insn than the ones we are processing,
13645 we can't use it even if the register was only set once. */
13646 if (rsp
->last_set_label
== label_tick
13647 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
13650 /* If fewer bits were set than what we are asked for now, we cannot use
13652 if (GET_MODE_PRECISION (rsp
->last_set_mode
)
13653 < GET_MODE_PRECISION (GET_MODE (x
)))
13656 /* If the value has all its registers valid, return it. */
13657 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
13660 /* Otherwise, make a copy and replace any invalid register with
13661 (clobber (const_int 0)). If that fails for some reason, return 0. */
13663 value
= copy_rtx (value
);
13664 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
13670 /* Define three variables used for communication between the following
13673 static unsigned int reg_dead_regno
, reg_dead_endregno
;
13674 static int reg_dead_flag
;
13676 /* Function called via note_stores from reg_dead_at_p.
13678 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13679 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13682 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
13684 unsigned int regno
, endregno
;
13689 regno
= REGNO (dest
);
13690 endregno
= END_REGNO (dest
);
13691 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
13692 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
13695 /* Return nonzero if REG is known to be dead at INSN.
13697 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13698 referencing REG, it is dead. If we hit a SET referencing REG, it is
13699 live. Otherwise, see if it is live or dead at the start of the basic
13700 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13701 must be assumed to be always live. */
13704 reg_dead_at_p (rtx reg
, rtx_insn
*insn
)
13709 /* Set variables for reg_dead_at_p_1. */
13710 reg_dead_regno
= REGNO (reg
);
13711 reg_dead_endregno
= END_REGNO (reg
);
13715 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13716 we allow the machine description to decide whether use-and-clobber
13717 patterns are OK. */
13718 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
13720 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13721 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
13725 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13726 beginning of basic block. */
13727 block
= BLOCK_FOR_INSN (insn
);
13732 if (find_regno_note (insn
, REG_UNUSED
, reg_dead_regno
))
13735 note_stores (PATTERN (insn
), reg_dead_at_p_1
, NULL
);
13737 return reg_dead_flag
== 1 ? 1 : 0;
13739 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
13743 if (insn
== BB_HEAD (block
))
13746 insn
= PREV_INSN (insn
);
13749 /* Look at live-in sets for the basic block that we were in. */
13750 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13751 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
13757 /* Note hard registers in X that are used. */
13760 mark_used_regs_combine (rtx x
)
13762 RTX_CODE code
= GET_CODE (x
);
13763 unsigned int regno
;
13774 case ADDR_DIFF_VEC
:
13776 /* CC0 must die in the insn after it is set, so we don't need to take
13777 special note of it here. */
13782 /* If we are clobbering a MEM, mark any hard registers inside the
13783 address as used. */
13784 if (MEM_P (XEXP (x
, 0)))
13785 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
13790 /* A hard reg in a wide mode may really be multiple registers.
13791 If so, mark all of them just like the first. */
13792 if (regno
< FIRST_PSEUDO_REGISTER
)
13794 /* None of this applies to the stack, frame or arg pointers. */
13795 if (regno
== STACK_POINTER_REGNUM
13796 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13797 && regno
== HARD_FRAME_POINTER_REGNUM
)
13798 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
13799 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
13800 || regno
== FRAME_POINTER_REGNUM
)
13803 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
13809 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13811 rtx testreg
= SET_DEST (x
);
13813 while (GET_CODE (testreg
) == SUBREG
13814 || GET_CODE (testreg
) == ZERO_EXTRACT
13815 || GET_CODE (testreg
) == STRICT_LOW_PART
)
13816 testreg
= XEXP (testreg
, 0);
13818 if (MEM_P (testreg
))
13819 mark_used_regs_combine (XEXP (testreg
, 0));
13821 mark_used_regs_combine (SET_SRC (x
));
13829 /* Recursively scan the operands of this expression. */
13832 const char *fmt
= GET_RTX_FORMAT (code
);
13834 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13837 mark_used_regs_combine (XEXP (x
, i
));
13838 else if (fmt
[i
] == 'E')
13842 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13843 mark_used_regs_combine (XVECEXP (x
, i
, j
));
13849 /* Remove register number REGNO from the dead registers list of INSN.
13851 Return the note used to record the death, if there was one. */
13854 remove_death (unsigned int regno
, rtx_insn
*insn
)
13856 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
13859 remove_note (insn
, note
);
13864 /* For each register (hardware or pseudo) used within expression X, if its
13865 death is in an instruction with luid between FROM_LUID (inclusive) and
13866 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13867 list headed by PNOTES.
13869 That said, don't move registers killed by maybe_kill_insn.
13871 This is done when X is being merged by combination into TO_INSN. These
13872 notes will then be distributed as needed. */
13875 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx_insn
*to_insn
,
13880 enum rtx_code code
= GET_CODE (x
);
13884 unsigned int regno
= REGNO (x
);
13885 rtx_insn
*where_dead
= reg_stat
[regno
].last_death
;
13887 /* If we do not know where the register died, it may still die between
13888 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
13889 if (!where_dead
|| DF_INSN_LUID (where_dead
) >= DF_INSN_LUID (to_insn
))
13891 rtx_insn
*insn
= prev_real_insn (to_insn
);
13893 && BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (to_insn
)
13894 && DF_INSN_LUID (insn
) >= from_luid
)
13896 if (dead_or_set_regno_p (insn
, regno
))
13898 if (find_regno_note (insn
, REG_DEAD
, regno
))
13903 insn
= prev_real_insn (insn
);
13907 /* Don't move the register if it gets killed in between from and to. */
13908 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
13909 && ! reg_referenced_p (x
, maybe_kill_insn
))
13913 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
13914 && DF_INSN_LUID (where_dead
) >= from_luid
13915 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
13917 rtx note
= remove_death (regno
, where_dead
);
13919 /* It is possible for the call above to return 0. This can occur
13920 when last_death points to I2 or I1 that we combined with.
13921 In that case make a new note.
13923 We must also check for the case where X is a hard register
13924 and NOTE is a death note for a range of hard registers
13925 including X. In that case, we must put REG_DEAD notes for
13926 the remaining registers in place of NOTE. */
13928 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
13929 && partial_subreg_p (GET_MODE (x
), GET_MODE (XEXP (note
, 0))))
13931 unsigned int deadregno
= REGNO (XEXP (note
, 0));
13932 unsigned int deadend
= END_REGNO (XEXP (note
, 0));
13933 unsigned int ourend
= END_REGNO (x
);
13936 for (i
= deadregno
; i
< deadend
; i
++)
13937 if (i
< regno
|| i
>= ourend
)
13938 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
13941 /* If we didn't find any note, or if we found a REG_DEAD note that
13942 covers only part of the given reg, and we have a multi-reg hard
13943 register, then to be safe we must check for REG_DEAD notes
13944 for each register other than the first. They could have
13945 their own REG_DEAD notes lying around. */
13946 else if ((note
== 0
13948 && partial_subreg_p (GET_MODE (XEXP (note
, 0)),
13950 && regno
< FIRST_PSEUDO_REGISTER
13951 && REG_NREGS (x
) > 1)
13953 unsigned int ourend
= END_REGNO (x
);
13954 unsigned int i
, offset
;
13958 offset
= hard_regno_nregs (regno
, GET_MODE (XEXP (note
, 0)));
13962 for (i
= regno
+ offset
; i
< ourend
; i
++)
13963 move_deaths (regno_reg_rtx
[i
],
13964 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
13967 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
13969 XEXP (note
, 1) = *pnotes
;
13973 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
13979 else if (GET_CODE (x
) == SET
)
13981 rtx dest
= SET_DEST (x
);
13983 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13985 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13986 that accesses one word of a multi-word item, some
13987 piece of everything register in the expression is used by
13988 this insn, so remove any old death. */
13989 /* ??? So why do we test for equality of the sizes? */
13991 if (GET_CODE (dest
) == ZERO_EXTRACT
13992 || GET_CODE (dest
) == STRICT_LOW_PART
13993 || (GET_CODE (dest
) == SUBREG
13994 && !read_modify_subreg_p (dest
)))
13996 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14000 /* If this is some other SUBREG, we know it replaces the entire
14001 value, so use that as the destination. */
14002 if (GET_CODE (dest
) == SUBREG
)
14003 dest
= SUBREG_REG (dest
);
14005 /* If this is a MEM, adjust deaths of anything used in the address.
14006 For a REG (the only other possibility), the entire value is
14007 being replaced so the old value is not used in this insn. */
14010 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
14015 else if (GET_CODE (x
) == CLOBBER
)
14018 len
= GET_RTX_LENGTH (code
);
14019 fmt
= GET_RTX_FORMAT (code
);
14021 for (i
= 0; i
< len
; i
++)
14026 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
14027 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
14030 else if (fmt
[i
] == 'e')
14031 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14035 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14036 pattern of an insn. X must be a REG. */
14039 reg_bitfield_target_p (rtx x
, rtx body
)
14043 if (GET_CODE (body
) == SET
)
14045 rtx dest
= SET_DEST (body
);
14047 unsigned int regno
, tregno
, endregno
, endtregno
;
14049 if (GET_CODE (dest
) == ZERO_EXTRACT
)
14050 target
= XEXP (dest
, 0);
14051 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
14052 target
= SUBREG_REG (XEXP (dest
, 0));
14056 if (GET_CODE (target
) == SUBREG
)
14057 target
= SUBREG_REG (target
);
14059 if (!REG_P (target
))
14062 tregno
= REGNO (target
), regno
= REGNO (x
);
14063 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
14064 return target
== x
;
14066 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
14067 endregno
= end_hard_regno (GET_MODE (x
), regno
);
14069 return endregno
> tregno
&& regno
< endtregno
;
14072 else if (GET_CODE (body
) == PARALLEL
)
14073 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
14074 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
14080 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14081 as appropriate. I3 and I2 are the insns resulting from the combination
14082 insns including FROM (I2 may be zero).
14084 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14085 not need REG_DEAD notes because they are being substituted for. This
14086 saves searching in the most common cases.
14088 Each note in the list is either ignored or placed on some insns, depending
14089 on the type of note. */
14092 distribute_notes (rtx notes
, rtx_insn
*from_insn
, rtx_insn
*i3
, rtx_insn
*i2
,
14093 rtx elim_i2
, rtx elim_i1
, rtx elim_i0
)
14095 rtx note
, next_note
;
14097 rtx_insn
*tem_insn
;
14099 for (note
= notes
; note
; note
= next_note
)
14101 rtx_insn
*place
= 0, *place2
= 0;
14103 next_note
= XEXP (note
, 1);
14104 switch (REG_NOTE_KIND (note
))
14108 /* Doesn't matter much where we put this, as long as it's somewhere.
14109 It is preferable to keep these notes on branches, which is most
14110 likely to be i3. */
14114 case REG_NON_LOCAL_GOTO
:
14119 gcc_assert (i2
&& JUMP_P (i2
));
14124 case REG_EH_REGION
:
14125 /* These notes must remain with the call or trapping instruction. */
14128 else if (i2
&& CALL_P (i2
))
14132 gcc_assert (cfun
->can_throw_non_call_exceptions
);
14133 if (may_trap_p (i3
))
14135 else if (i2
&& may_trap_p (i2
))
14137 /* ??? Otherwise assume we've combined things such that we
14138 can now prove that the instructions can't trap. Drop the
14139 note in this case. */
14143 case REG_ARGS_SIZE
:
14144 /* ??? How to distribute between i3-i1. Assume i3 contains the
14145 entire adjustment. Assert i3 contains at least some adjust. */
14146 if (!noop_move_p (i3
))
14148 poly_int64 old_size
, args_size
= get_args_size (note
);
14149 /* fixup_args_size_notes looks at REG_NORETURN note,
14150 so ensure the note is placed there first. */
14154 for (np
= &next_note
; *np
; np
= &XEXP (*np
, 1))
14155 if (REG_NOTE_KIND (*np
) == REG_NORETURN
)
14159 XEXP (n
, 1) = REG_NOTES (i3
);
14160 REG_NOTES (i3
) = n
;
14164 old_size
= fixup_args_size_notes (PREV_INSN (i3
), i3
, args_size
);
14165 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14166 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14167 gcc_assert (maybe_ne (old_size
, args_size
)
14169 && !ACCUMULATE_OUTGOING_ARGS
14170 && find_reg_note (i3
, REG_NORETURN
, NULL_RTX
)));
14177 case REG_CALL_DECL
:
14178 case REG_CALL_NOCF_CHECK
:
14179 /* These notes must remain with the call. It should not be
14180 possible for both I2 and I3 to be a call. */
14185 gcc_assert (i2
&& CALL_P (i2
));
14191 /* Any clobbers for i3 may still exist, and so we must process
14192 REG_UNUSED notes from that insn.
14194 Any clobbers from i2 or i1 can only exist if they were added by
14195 recog_for_combine. In that case, recog_for_combine created the
14196 necessary REG_UNUSED notes. Trying to keep any original
14197 REG_UNUSED notes from these insns can cause incorrect output
14198 if it is for the same register as the original i3 dest.
14199 In that case, we will notice that the register is set in i3,
14200 and then add a REG_UNUSED note for the destination of i3, which
14201 is wrong. However, it is possible to have REG_UNUSED notes from
14202 i2 or i1 for register which were both used and clobbered, so
14203 we keep notes from i2 or i1 if they will turn into REG_DEAD
14206 /* If this register is set or clobbered in I3, put the note there
14207 unless there is one already. */
14208 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
14210 if (from_insn
!= i3
)
14213 if (! (REG_P (XEXP (note
, 0))
14214 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
14215 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
14218 /* Otherwise, if this register is used by I3, then this register
14219 now dies here, so we must put a REG_DEAD note here unless there
14221 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
14222 && ! (REG_P (XEXP (note
, 0))
14223 ? find_regno_note (i3
, REG_DEAD
,
14224 REGNO (XEXP (note
, 0)))
14225 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
14227 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14231 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14232 but we can't tell which at this point. We must reset any
14233 expectations we had about the value that was previously
14234 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14235 and, if appropriate, restore its previous value, but we
14236 don't have enough information for that at this point. */
14239 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14241 /* Otherwise, if this register is now referenced in i2
14242 then the register used to be modified in one of the
14243 original insns. If it was i3 (say, in an unused
14244 parallel), it's now completely gone, so the note can
14245 be discarded. But if it was modified in i2, i1 or i0
14246 and we still reference it in i2, then we're
14247 referencing the previous value, and since the
14248 register was modified and REG_UNUSED, we know that
14249 the previous value is now dead. So, if we only
14250 reference the register in i2, we change the note to
14251 REG_DEAD, to reflect the previous value. However, if
14252 we're also setting or clobbering the register as
14253 scratch, we know (because the register was not
14254 referenced in i3) that it's unused, just as it was
14255 unused before, and we place the note in i2. */
14256 if (from_insn
!= i3
&& i2
&& INSN_P (i2
)
14257 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14259 if (!reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14260 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14261 if (! (REG_P (XEXP (note
, 0))
14262 ? find_regno_note (i2
, REG_NOTE_KIND (note
),
14263 REGNO (XEXP (note
, 0)))
14264 : find_reg_note (i2
, REG_NOTE_KIND (note
),
14275 /* These notes say something about results of an insn. We can
14276 only support them if they used to be on I3 in which case they
14277 remain on I3. Otherwise they are ignored.
14279 If the note refers to an expression that is not a constant, we
14280 must also ignore the note since we cannot tell whether the
14281 equivalence is still true. It might be possible to do
14282 slightly better than this (we only have a problem if I2DEST
14283 or I1DEST is present in the expression), but it doesn't
14284 seem worth the trouble. */
14286 if (from_insn
== i3
14287 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
14292 /* These notes say something about how a register is used. They must
14293 be present on any use of the register in I2 or I3. */
14294 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
14297 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
14306 case REG_LABEL_TARGET
:
14307 case REG_LABEL_OPERAND
:
14308 /* This can show up in several ways -- either directly in the
14309 pattern, or hidden off in the constant pool with (or without?)
14310 a REG_EQUAL note. */
14311 /* ??? Ignore the without-reg_equal-note problem for now. */
14312 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
14313 || ((tem_note
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
14314 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14315 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0)))
14319 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
14320 || ((tem_note
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
14321 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14322 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0))))
14330 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14331 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14333 if (place
&& JUMP_P (place
)
14334 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14335 && (JUMP_LABEL (place
) == NULL
14336 || JUMP_LABEL (place
) == XEXP (note
, 0)))
14338 rtx label
= JUMP_LABEL (place
);
14341 JUMP_LABEL (place
) = XEXP (note
, 0);
14342 else if (LABEL_P (label
))
14343 LABEL_NUSES (label
)--;
14346 if (place2
&& JUMP_P (place2
)
14347 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14348 && (JUMP_LABEL (place2
) == NULL
14349 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
14351 rtx label
= JUMP_LABEL (place2
);
14354 JUMP_LABEL (place2
) = XEXP (note
, 0);
14355 else if (LABEL_P (label
))
14356 LABEL_NUSES (label
)--;
14362 /* This note says something about the value of a register prior
14363 to the execution of an insn. It is too much trouble to see
14364 if the note is still correct in all situations. It is better
14365 to simply delete it. */
14369 /* If we replaced the right hand side of FROM_INSN with a
14370 REG_EQUAL note, the original use of the dying register
14371 will not have been combined into I3 and I2. In such cases,
14372 FROM_INSN is guaranteed to be the first of the combined
14373 instructions, so we simply need to search back before
14374 FROM_INSN for the previous use or set of this register,
14375 then alter the notes there appropriately.
14377 If the register is used as an input in I3, it dies there.
14378 Similarly for I2, if it is nonzero and adjacent to I3.
14380 If the register is not used as an input in either I3 or I2
14381 and it is not one of the registers we were supposed to eliminate,
14382 there are two possibilities. We might have a non-adjacent I2
14383 or we might have somehow eliminated an additional register
14384 from a computation. For example, we might have had A & B where
14385 we discover that B will always be zero. In this case we will
14386 eliminate the reference to A.
14388 In both cases, we must search to see if we can find a previous
14389 use of A and put the death note there. */
14392 && from_insn
== i2mod
14393 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
14394 tem_insn
= from_insn
;
14398 && CALL_P (from_insn
)
14399 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
14401 else if (i2
&& reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14403 /* If the new I2 sets the same register that is marked
14404 dead in the note, we do not in general know where to
14405 put the note. One important case we _can_ handle is
14406 when the note comes from I3. */
14407 if (from_insn
== i3
)
14412 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
14414 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
14415 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14417 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
14419 && reg_overlap_mentioned_p (XEXP (note
, 0),
14421 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
14422 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
14429 basic_block bb
= this_basic_block
;
14431 for (tem_insn
= PREV_INSN (tem_insn
); place
== 0; tem_insn
= PREV_INSN (tem_insn
))
14433 if (!NONDEBUG_INSN_P (tem_insn
))
14435 if (tem_insn
== BB_HEAD (bb
))
14440 /* If the register is being set at TEM_INSN, see if that is all
14441 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14442 into a REG_UNUSED note instead. Don't delete sets to
14443 global register vars. */
14444 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
14445 || !global_regs
[REGNO (XEXP (note
, 0))])
14446 && reg_set_p (XEXP (note
, 0), PATTERN (tem_insn
)))
14448 rtx set
= single_set (tem_insn
);
14449 rtx inner_dest
= 0;
14450 rtx_insn
*cc0_setter
= NULL
;
14453 for (inner_dest
= SET_DEST (set
);
14454 (GET_CODE (inner_dest
) == STRICT_LOW_PART
14455 || GET_CODE (inner_dest
) == SUBREG
14456 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
14457 inner_dest
= XEXP (inner_dest
, 0))
14460 /* Verify that it was the set, and not a clobber that
14461 modified the register.
14463 CC0 targets must be careful to maintain setter/user
14464 pairs. If we cannot delete the setter due to side
14465 effects, mark the user with an UNUSED note instead
14468 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
14469 && rtx_equal_p (XEXP (note
, 0), inner_dest
)
14471 || (! reg_mentioned_p (cc0_rtx
, SET_SRC (set
))
14472 || ((cc0_setter
= prev_cc0_setter (tem_insn
)) != NULL
14473 && sets_cc0_p (PATTERN (cc0_setter
)) > 0))))
14475 /* Move the notes and links of TEM_INSN elsewhere.
14476 This might delete other dead insns recursively.
14477 First set the pattern to something that won't use
14479 rtx old_notes
= REG_NOTES (tem_insn
);
14481 PATTERN (tem_insn
) = pc_rtx
;
14482 REG_NOTES (tem_insn
) = NULL
;
14484 distribute_notes (old_notes
, tem_insn
, tem_insn
, NULL
,
14485 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14486 distribute_links (LOG_LINKS (tem_insn
));
14488 unsigned int regno
= REGNO (XEXP (note
, 0));
14489 reg_stat_type
*rsp
= ®_stat
[regno
];
14490 if (rsp
->last_set
== tem_insn
)
14491 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14493 SET_INSN_DELETED (tem_insn
);
14494 if (tem_insn
== i2
)
14497 /* Delete the setter too. */
14500 PATTERN (cc0_setter
) = pc_rtx
;
14501 old_notes
= REG_NOTES (cc0_setter
);
14502 REG_NOTES (cc0_setter
) = NULL
;
14504 distribute_notes (old_notes
, cc0_setter
,
14506 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14507 distribute_links (LOG_LINKS (cc0_setter
));
14509 SET_INSN_DELETED (cc0_setter
);
14510 if (cc0_setter
== i2
)
14516 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
14518 /* If there isn't already a REG_UNUSED note, put one
14519 here. Do not place a REG_DEAD note, even if
14520 the register is also used here; that would not
14521 match the algorithm used in lifetime analysis
14522 and can cause the consistency check in the
14523 scheduler to fail. */
14524 if (! find_regno_note (tem_insn
, REG_UNUSED
,
14525 REGNO (XEXP (note
, 0))))
14530 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem_insn
))
14531 || (CALL_P (tem_insn
)
14532 && find_reg_fusage (tem_insn
, USE
, XEXP (note
, 0))))
14536 /* If we are doing a 3->2 combination, and we have a
14537 register which formerly died in i3 and was not used
14538 by i2, which now no longer dies in i3 and is used in
14539 i2 but does not die in i2, and place is between i2
14540 and i3, then we may need to move a link from place to
14542 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
14544 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
14545 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14547 struct insn_link
*links
= LOG_LINKS (place
);
14548 LOG_LINKS (place
) = NULL
;
14549 distribute_links (links
);
14554 if (tem_insn
== BB_HEAD (bb
))
14560 /* If the register is set or already dead at PLACE, we needn't do
14561 anything with this note if it is still a REG_DEAD note.
14562 We check here if it is set at all, not if is it totally replaced,
14563 which is what `dead_or_set_p' checks, so also check for it being
14566 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
14568 unsigned int regno
= REGNO (XEXP (note
, 0));
14569 reg_stat_type
*rsp
= ®_stat
[regno
];
14571 if (dead_or_set_p (place
, XEXP (note
, 0))
14572 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
14574 /* Unless the register previously died in PLACE, clear
14575 last_death. [I no longer understand why this is
14577 if (rsp
->last_death
!= place
)
14578 rsp
->last_death
= 0;
14582 rsp
->last_death
= place
;
14584 /* If this is a death note for a hard reg that is occupying
14585 multiple registers, ensure that we are still using all
14586 parts of the object. If we find a piece of the object
14587 that is unused, we must arrange for an appropriate REG_DEAD
14588 note to be added for it. However, we can't just emit a USE
14589 and tag the note to it, since the register might actually
14590 be dead; so we recourse, and the recursive call then finds
14591 the previous insn that used this register. */
14593 if (place
&& REG_NREGS (XEXP (note
, 0)) > 1)
14595 unsigned int endregno
= END_REGNO (XEXP (note
, 0));
14596 bool all_used
= true;
14599 for (i
= regno
; i
< endregno
; i
++)
14600 if ((! refers_to_regno_p (i
, PATTERN (place
))
14601 && ! find_regno_fusage (place
, USE
, i
))
14602 || dead_or_set_regno_p (place
, i
))
14610 /* Put only REG_DEAD notes for pieces that are
14611 not already dead or set. */
14613 for (i
= regno
; i
< endregno
;
14614 i
+= hard_regno_nregs (i
, reg_raw_mode
[i
]))
14616 rtx piece
= regno_reg_rtx
[i
];
14617 basic_block bb
= this_basic_block
;
14619 if (! dead_or_set_p (place
, piece
)
14620 && ! reg_bitfield_target_p (piece
,
14623 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
14626 distribute_notes (new_note
, place
, place
,
14627 NULL
, NULL_RTX
, NULL_RTX
,
14630 else if (! refers_to_regno_p (i
, PATTERN (place
))
14631 && ! find_regno_fusage (place
, USE
, i
))
14632 for (tem_insn
= PREV_INSN (place
); ;
14633 tem_insn
= PREV_INSN (tem_insn
))
14635 if (!NONDEBUG_INSN_P (tem_insn
))
14637 if (tem_insn
== BB_HEAD (bb
))
14641 if (dead_or_set_p (tem_insn
, piece
)
14642 || reg_bitfield_target_p (piece
,
14643 PATTERN (tem_insn
)))
14645 add_reg_note (tem_insn
, REG_UNUSED
, piece
);
14658 /* Any other notes should not be present at this point in the
14660 gcc_unreachable ();
14665 XEXP (note
, 1) = REG_NOTES (place
);
14666 REG_NOTES (place
) = note
;
14668 /* Set added_notes_insn to the earliest insn we added a note to. */
14669 if (added_notes_insn
== 0
14670 || DF_INSN_LUID (added_notes_insn
) > DF_INSN_LUID (place
))
14671 added_notes_insn
= place
;
14676 add_shallow_copy_of_reg_note (place2
, note
);
14678 /* Set added_notes_insn to the earliest insn we added a note to. */
14679 if (added_notes_insn
== 0
14680 || DF_INSN_LUID (added_notes_insn
) > DF_INSN_LUID (place2
))
14681 added_notes_insn
= place2
;
14686 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14687 I3, I2, and I1 to new locations. This is also called to add a link
14688 pointing at I3 when I3's destination is changed. */
14691 distribute_links (struct insn_link
*links
)
14693 struct insn_link
*link
, *next_link
;
14695 for (link
= links
; link
; link
= next_link
)
14697 rtx_insn
*place
= 0;
14701 next_link
= link
->next
;
14703 /* If the insn that this link points to is a NOTE, ignore it. */
14704 if (NOTE_P (link
->insn
))
14708 rtx pat
= PATTERN (link
->insn
);
14709 if (GET_CODE (pat
) == SET
)
14711 else if (GET_CODE (pat
) == PARALLEL
)
14714 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
14716 set
= XVECEXP (pat
, 0, i
);
14717 if (GET_CODE (set
) != SET
)
14720 reg
= SET_DEST (set
);
14721 while (GET_CODE (reg
) == ZERO_EXTRACT
14722 || GET_CODE (reg
) == STRICT_LOW_PART
14723 || GET_CODE (reg
) == SUBREG
)
14724 reg
= XEXP (reg
, 0);
14729 if (REGNO (reg
) == link
->regno
)
14732 if (i
== XVECLEN (pat
, 0))
14738 reg
= SET_DEST (set
);
14740 while (GET_CODE (reg
) == ZERO_EXTRACT
14741 || GET_CODE (reg
) == STRICT_LOW_PART
14742 || GET_CODE (reg
) == SUBREG
)
14743 reg
= XEXP (reg
, 0);
14745 /* A LOG_LINK is defined as being placed on the first insn that uses
14746 a register and points to the insn that sets the register. Start
14747 searching at the next insn after the target of the link and stop
14748 when we reach a set of the register or the end of the basic block.
14750 Note that this correctly handles the link that used to point from
14751 I3 to I2. Also note that not much searching is typically done here
14752 since most links don't point very far away. */
14754 for (insn
= NEXT_INSN (link
->insn
);
14755 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
14756 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
14757 insn
= NEXT_INSN (insn
))
14758 if (DEBUG_INSN_P (insn
))
14760 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
14762 if (reg_referenced_p (reg
, PATTERN (insn
)))
14766 else if (CALL_P (insn
)
14767 && find_reg_fusage (insn
, USE
, reg
))
14772 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
14775 /* If we found a place to put the link, place it there unless there
14776 is already a link to the same insn as LINK at that point. */
14780 struct insn_link
*link2
;
14782 FOR_EACH_LOG_LINK (link2
, place
)
14783 if (link2
->insn
== link
->insn
&& link2
->regno
== link
->regno
)
14788 link
->next
= LOG_LINKS (place
);
14789 LOG_LINKS (place
) = link
;
14791 /* Set added_links_insn to the earliest insn we added a
14793 if (added_links_insn
== 0
14794 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
14795 added_links_insn
= place
;
14801 /* Check for any register or memory mentioned in EQUIV that is not
14802 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14803 of EXPR where some registers may have been replaced by constants. */
14806 unmentioned_reg_p (rtx equiv
, rtx expr
)
14808 subrtx_iterator::array_type array
;
14809 FOR_EACH_SUBRTX (iter
, array
, equiv
, NONCONST
)
14811 const_rtx x
= *iter
;
14812 if ((REG_P (x
) || MEM_P (x
))
14813 && !reg_mentioned_p (x
, expr
))
14819 DEBUG_FUNCTION
void
14820 dump_combine_stats (FILE *file
)
14824 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14825 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
14829 dump_combine_total_stats (FILE *file
)
14833 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14834 total_attempts
, total_merges
, total_extras
, total_successes
);
14837 /* Try combining insns through substitution. */
14838 static unsigned int
14839 rest_of_handle_combine (void)
14841 int rebuild_jump_labels_after_combine
;
14843 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
14844 df_note_add_problem ();
14847 regstat_init_n_sets_and_refs ();
14848 reg_n_sets_max
= max_reg_num ();
14850 rebuild_jump_labels_after_combine
14851 = combine_instructions (get_insns (), max_reg_num ());
14853 /* Combining insns may have turned an indirect jump into a
14854 direct jump. Rebuild the JUMP_LABEL fields of jumping
14856 if (rebuild_jump_labels_after_combine
)
14858 if (dom_info_available_p (CDI_DOMINATORS
))
14859 free_dominance_info (CDI_DOMINATORS
);
14860 timevar_push (TV_JUMP
);
14861 rebuild_jump_labels (get_insns ());
14863 timevar_pop (TV_JUMP
);
14866 regstat_free_n_sets_and_refs ();
14872 const pass_data pass_data_combine
=
14874 RTL_PASS
, /* type */
14875 "combine", /* name */
14876 OPTGROUP_NONE
, /* optinfo_flags */
14877 TV_COMBINE
, /* tv_id */
14878 PROP_cfglayout
, /* properties_required */
14879 0, /* properties_provided */
14880 0, /* properties_destroyed */
14881 0, /* todo_flags_start */
14882 TODO_df_finish
, /* todo_flags_finish */
14885 class pass_combine
: public rtl_opt_pass
14888 pass_combine (gcc::context
*ctxt
)
14889 : rtl_opt_pass (pass_data_combine
, ctxt
)
14892 /* opt_pass methods: */
14893 virtual bool gate (function
*) { return (optimize
> 0); }
14894 virtual unsigned int execute (function
*)
14896 return rest_of_handle_combine ();
14899 }; // class pass_combine
14901 } // anon namespace
14904 make_pass_combine (gcc::context
*ctxt
)
14906 return new pass_combine (ctxt
);