1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2022 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 We check (with modified_between_p) to avoid combining in such a way
37 as to move a computation to a place where its value would be different.
39 Combination is done by mathematically substituting the previous
40 insn(s) values for the regs they set into the expressions in
41 the later insns that refer to these regs. If the result is a valid insn
42 for our target machine, according to the machine description,
43 we install it, delete the earlier insns, and update the data flow
44 information (LOG_LINKS and REG_NOTES) for what we did.
46 There are a few exceptions where the dataflow information isn't
47 completely updated (however this is only a local issue since it is
48 regenerated before the next pass that uses it):
50 - reg_live_length is not updated
51 - reg_n_refs is not adjusted in the rare case when a register is
52 no longer required in a computation
53 - there are extremely rare cases (see distribute_notes) when a
55 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
56 removed because there is no way to know which register it was
59 To simplify substitution, we combine only when the earlier insn(s)
60 consist of only a single assignment. To simplify updating afterward,
61 we never combine when a subroutine call appears in the middle. */
65 #include "coretypes.h"
80 #include "stor-layout.h"
82 #include "cfgcleanup.h"
83 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
85 #include "insn-attr.h"
86 #include "rtlhooks-def.h"
88 #include "tree-pass.h"
91 #include "print-rtl.h"
92 #include "function-abi.h"
95 /* Number of attempts to combine instructions in this function. */
97 static int combine_attempts
;
99 /* Number of attempts that got as far as substitution in this function. */
101 static int combine_merges
;
103 /* Number of instructions combined with added SETs in this function. */
105 static int combine_extras
;
107 /* Number of instructions combined in this function. */
109 static int combine_successes
;
111 /* Totals over entire compilation. */
113 static int total_attempts
, total_merges
, total_extras
, total_successes
;
115 /* combine_instructions may try to replace the right hand side of the
116 second instruction with the value of an associated REG_EQUAL note
117 before throwing it at try_combine. That is problematic when there
118 is a REG_DEAD note for a register used in the old right hand side
119 and can cause distribute_notes to do wrong things. This is the
120 second instruction if it has been so modified, null otherwise. */
122 static rtx_insn
*i2mod
;
124 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
126 static rtx i2mod_old_rhs
;
128 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
130 static rtx i2mod_new_rhs
;
132 struct reg_stat_type
{
133 /* Record last point of death of (hard or pseudo) register n. */
134 rtx_insn
*last_death
;
136 /* Record last point of modification of (hard or pseudo) register n. */
139 /* The next group of fields allows the recording of the last value assigned
140 to (hard or pseudo) register n. We use this information to see if an
141 operation being processed is redundant given a prior operation performed
142 on the register. For example, an `and' with a constant is redundant if
143 all the zero bits are already known to be turned off.
145 We use an approach similar to that used by cse, but change it in the
148 (1) We do not want to reinitialize at each label.
149 (2) It is useful, but not critical, to know the actual value assigned
150 to a register. Often just its form is helpful.
152 Therefore, we maintain the following fields:
154 last_set_value the last value assigned
155 last_set_label records the value of label_tick when the
156 register was assigned
157 last_set_table_tick records the value of label_tick when a
158 value using the register is assigned
159 last_set_invalid set to nonzero when it is not valid
160 to use the value of this register in some
163 To understand the usage of these tables, it is important to understand
164 the distinction between the value in last_set_value being valid and
165 the register being validly contained in some other expression in the
168 (The next two parameters are out of date).
170 reg_stat[i].last_set_value is valid if it is nonzero, and either
171 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
173 Register I may validly appear in any expression returned for the value
174 of another register if reg_n_sets[i] is 1. It may also appear in the
175 value for register J if reg_stat[j].last_set_invalid is zero, or
176 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
178 If an expression is found in the table containing a register which may
179 not validly appear in an expression, the register is replaced by
180 something that won't match, (clobber (const_int 0)). */
182 /* Record last value assigned to (hard or pseudo) register n. */
186 /* Record the value of label_tick when an expression involving register n
187 is placed in last_set_value. */
189 int last_set_table_tick
;
191 /* Record the value of label_tick when the value for register n is placed in
196 /* These fields are maintained in parallel with last_set_value and are
197 used to store the mode in which the register was last set, the bits
198 that were known to be zero when it was last set, and the number of
199 sign bits copies it was known to have when it was last set. */
201 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
202 char last_set_sign_bit_copies
;
203 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
205 /* Set nonzero if references to register n in expressions should not be
206 used. last_set_invalid is set nonzero when this register is being
207 assigned to and last_set_table_tick == label_tick. */
209 char last_set_invalid
;
211 /* Some registers that are set more than once and used in more than one
212 basic block are nevertheless always set in similar ways. For example,
213 a QImode register may be loaded from memory in two places on a machine
214 where byte loads zero extend.
216 We record in the following fields if a register has some leading bits
217 that are always equal to the sign bit, and what we know about the
218 nonzero bits of a register, specifically which bits are known to be
221 If an entry is zero, it means that we don't know anything special. */
223 unsigned char sign_bit_copies
;
225 unsigned HOST_WIDE_INT nonzero_bits
;
227 /* Record the value of the label_tick when the last truncation
228 happened. The field truncated_to_mode is only valid if
229 truncation_label == label_tick. */
231 int truncation_label
;
233 /* Record the last truncation seen for this register. If truncation
234 is not a nop to this mode we might be able to save an explicit
235 truncation if we know that value already contains a truncated
238 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
242 static vec
<reg_stat_type
> reg_stat
;
244 /* One plus the highest pseudo for which we track REG_N_SETS.
245 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
246 but during combine_split_insns new pseudos can be created. As we don't have
247 updated DF information in that case, it is hard to initialize the array
248 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
249 so instead of growing the arrays, just assume all newly created pseudos
250 during combine might be set multiple times. */
252 static unsigned int reg_n_sets_max
;
254 /* Record the luid of the last insn that invalidated memory
255 (anything that writes memory, and subroutine calls, but not pushes). */
257 static int mem_last_set
;
259 /* Record the luid of the last CALL_INSN
260 so we can tell whether a potential combination crosses any calls. */
262 static int last_call_luid
;
264 /* When `subst' is called, this is the insn that is being modified
265 (by combining in a previous insn). The PATTERN of this insn
266 is still the old pattern partially modified and it should not be
267 looked at, but this may be used to examine the successors of the insn
268 to judge whether a simplification is valid. */
270 static rtx_insn
*subst_insn
;
272 /* This is the lowest LUID that `subst' is currently dealing with.
273 get_last_value will not return a value if the register was set at or
274 after this LUID. If not for this mechanism, we could get confused if
275 I2 or I1 in try_combine were an insn that used the old value of a register
276 to obtain a new value. In that case, we might erroneously get the
277 new value of the register when we wanted the old one. */
279 static int subst_low_luid
;
281 /* This contains any hard registers that are used in newpat; reg_dead_at_p
282 must consider all these registers to be always live. */
284 static HARD_REG_SET newpat_used_regs
;
286 /* This is an insn to which a LOG_LINKS entry has been added. If this
287 insn is the earlier than I2 or I3, combine should rescan starting at
290 static rtx_insn
*added_links_insn
;
292 /* And similarly, for notes. */
294 static rtx_insn
*added_notes_insn
;
296 /* Basic block in which we are performing combines. */
297 static basic_block this_basic_block
;
298 static bool optimize_this_for_speed_p
;
301 /* Length of the currently allocated uid_insn_cost array. */
303 static int max_uid_known
;
305 /* The following array records the insn_cost for every insn
306 in the instruction stream. */
308 static int *uid_insn_cost
;
310 /* The following array records the LOG_LINKS for every insn in the
311 instruction stream as struct insn_link pointers. */
316 struct insn_link
*next
;
319 static struct insn_link
**uid_log_links
;
322 insn_uid_check (const_rtx insn
)
324 int uid
= INSN_UID (insn
);
325 gcc_checking_assert (uid
<= max_uid_known
);
329 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
330 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
332 #define FOR_EACH_LOG_LINK(L, INSN) \
333 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
335 /* Links for LOG_LINKS are allocated from this obstack. */
337 static struct obstack insn_link_obstack
;
339 /* Allocate a link. */
341 static inline struct insn_link
*
342 alloc_insn_link (rtx_insn
*insn
, unsigned int regno
, struct insn_link
*next
)
345 = (struct insn_link
*) obstack_alloc (&insn_link_obstack
,
346 sizeof (struct insn_link
));
353 /* Incremented for each basic block. */
355 static int label_tick
;
357 /* Reset to label_tick for each extended basic block in scanning order. */
359 static int label_tick_ebb_start
;
361 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
362 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
364 static scalar_int_mode nonzero_bits_mode
;
366 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
367 be safely used. It is zero while computing them and after combine has
368 completed. This former test prevents propagating values based on
369 previously set values, which can be incorrect if a variable is modified
372 static int nonzero_sign_valid
;
375 /* Record one modification to rtl structure
376 to be undone by storing old_contents into *where. */
378 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
, UNDO_LINKS
};
384 union { rtx r
; int i
; machine_mode m
; struct insn_link
*l
; } old_contents
;
385 union { rtx
*r
; int *i
; struct insn_link
**l
; } where
;
388 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
389 num_undo says how many are currently recorded.
391 other_insn is nonzero if we have modified some other insn in the process
392 of working on subst_insn. It must be verified too. */
398 rtx_insn
*other_insn
;
401 static struct undobuf undobuf
;
403 /* Number of times the pseudo being substituted for
404 was found and replaced. */
406 static int n_occurrences
;
408 static rtx
reg_nonzero_bits_for_combine (const_rtx
, scalar_int_mode
,
410 unsigned HOST_WIDE_INT
*);
411 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, scalar_int_mode
,
414 static void do_SUBST (rtx
*, rtx
);
415 static void do_SUBST_INT (int *, int);
416 static void init_reg_last (void);
417 static void setup_incoming_promotions (rtx_insn
*);
418 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
419 static int cant_combine_insn_p (rtx_insn
*);
420 static int can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
421 rtx_insn
*, rtx_insn
*, rtx
*, rtx
*);
422 static int combinable_i3pat (rtx_insn
*, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
423 static int contains_muldiv (rtx
);
424 static rtx_insn
*try_combine (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
426 static void undo_all (void);
427 static void undo_commit (void);
428 static rtx
*find_split_point (rtx
*, rtx_insn
*, bool);
429 static rtx
subst (rtx
, rtx
, rtx
, int, int, int);
430 static rtx
combine_simplify_rtx (rtx
, machine_mode
, int, int);
431 static rtx
simplify_if_then_else (rtx
);
432 static rtx
simplify_set (rtx
);
433 static rtx
simplify_logical (rtx
);
434 static rtx
expand_compound_operation (rtx
);
435 static const_rtx
expand_field_assignment (const_rtx
);
436 static rtx
make_extraction (machine_mode
, rtx
, HOST_WIDE_INT
,
437 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
438 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
439 unsigned HOST_WIDE_INT
*);
440 static rtx
canon_reg_for_combine (rtx
, rtx
);
441 static rtx
force_int_to_mode (rtx
, scalar_int_mode
, scalar_int_mode
,
442 scalar_int_mode
, unsigned HOST_WIDE_INT
, int);
443 static rtx
force_to_mode (rtx
, machine_mode
,
444 unsigned HOST_WIDE_INT
, int);
445 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
446 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
447 static int rtx_equal_for_field_assignment_p (rtx
, rtx
, bool = false);
448 static rtx
make_field_assignment (rtx
);
449 static rtx
apply_distributive_law (rtx
);
450 static rtx
distribute_and_simplify_rtx (rtx
, int);
451 static rtx
simplify_and_const_int_1 (scalar_int_mode
, rtx
,
452 unsigned HOST_WIDE_INT
);
453 static rtx
simplify_and_const_int (rtx
, scalar_int_mode
, rtx
,
454 unsigned HOST_WIDE_INT
);
455 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
456 HOST_WIDE_INT
, machine_mode
, int *);
457 static rtx
simplify_shift_const_1 (enum rtx_code
, machine_mode
, rtx
, int);
458 static rtx
simplify_shift_const (rtx
, enum rtx_code
, machine_mode
, rtx
,
460 static int recog_for_combine (rtx
*, rtx_insn
*, rtx
*);
461 static rtx
gen_lowpart_for_combine (machine_mode
, rtx
);
462 static enum rtx_code
simplify_compare_const (enum rtx_code
, machine_mode
,
464 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
465 static void update_table_tick (rtx
);
466 static void record_value_for_reg (rtx
, rtx_insn
*, rtx
);
467 static void check_promoted_subreg (rtx_insn
*, rtx
);
468 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
469 static void record_dead_and_set_regs (rtx_insn
*);
470 static int get_last_value_validate (rtx
*, rtx_insn
*, int, int);
471 static rtx
get_last_value (const_rtx
);
472 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
473 static int reg_dead_at_p (rtx
, rtx_insn
*);
474 static void move_deaths (rtx
, rtx
, int, rtx_insn
*, rtx
*);
475 static int reg_bitfield_target_p (rtx
, rtx
);
476 static void distribute_notes (rtx
, rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx
, rtx
, rtx
);
477 static void distribute_links (struct insn_link
*);
478 static void mark_used_regs_combine (rtx
);
479 static void record_promoted_value (rtx_insn
*, rtx
);
480 static bool unmentioned_reg_p (rtx
, rtx
);
481 static void record_truncated_values (rtx
*, void *);
482 static bool reg_truncated_to_mode (machine_mode
, const_rtx
);
483 static rtx
gen_lowpart_or_truncate (machine_mode
, rtx
);
486 /* It is not safe to use ordinary gen_lowpart in combine.
487 See comments in gen_lowpart_for_combine. */
488 #undef RTL_HOOKS_GEN_LOWPART
489 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
491 /* Our implementation of gen_lowpart never emits a new pseudo. */
492 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
493 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
495 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
496 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
498 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
499 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
501 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
502 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
504 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
507 /* Convenience wrapper for the canonicalize_comparison target hook.
508 Target hooks cannot use enum rtx_code. */
510 target_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
,
511 bool op0_preserve_value
)
513 int code_int
= (int)*code
;
514 targetm
.canonicalize_comparison (&code_int
, op0
, op1
, op0_preserve_value
);
515 *code
= (enum rtx_code
)code_int
;
518 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
519 PATTERN cannot be split. Otherwise, it returns an insn sequence.
520 This is a wrapper around split_insns which ensures that the
521 reg_stat vector is made larger if the splitter creates a new
525 combine_split_insns (rtx pattern
, rtx_insn
*insn
)
530 ret
= split_insns (pattern
, insn
);
531 nregs
= max_reg_num ();
532 if (nregs
> reg_stat
.length ())
533 reg_stat
.safe_grow_cleared (nregs
, true);
537 /* This is used by find_single_use to locate an rtx in LOC that
538 contains exactly one use of DEST, which is typically a REG.
539 It returns a pointer to the innermost rtx expression
540 containing DEST. Appearances of DEST that are being used to
541 totally replace it are not counted. */
544 find_single_use_1 (rtx dest
, rtx
*loc
)
547 enum rtx_code code
= GET_CODE (x
);
563 /* If the destination is anything other than PC, a REG or a SUBREG
564 of a REG that occupies all of the REG, the insn uses DEST if
565 it is mentioned in the destination or the source. Otherwise, we
566 need just check the source. */
567 if (GET_CODE (SET_DEST (x
)) != PC
568 && !REG_P (SET_DEST (x
))
569 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
570 && REG_P (SUBREG_REG (SET_DEST (x
)))
571 && !read_modify_subreg_p (SET_DEST (x
))))
574 return find_single_use_1 (dest
, &SET_SRC (x
));
578 return find_single_use_1 (dest
, &XEXP (x
, 0));
584 /* If it wasn't one of the common cases above, check each expression and
585 vector of this code. Look for a unique usage of DEST. */
587 fmt
= GET_RTX_FORMAT (code
);
588 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
592 if (dest
== XEXP (x
, i
)
593 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
594 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
597 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
600 result
= this_result
;
601 else if (this_result
)
602 /* Duplicate usage. */
605 else if (fmt
[i
] == 'E')
609 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
611 if (XVECEXP (x
, i
, j
) == dest
613 && REG_P (XVECEXP (x
, i
, j
))
614 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
617 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
620 result
= this_result
;
621 else if (this_result
)
631 /* See if DEST, produced in INSN, is used only a single time in the
632 sequel. If so, return a pointer to the innermost rtx expression in which
635 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
637 Otherwise, we find the single use by finding an insn that has a
638 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
639 only referenced once in that insn, we know that it must be the first
640 and last insn referencing DEST. */
643 find_single_use (rtx dest
, rtx_insn
*insn
, rtx_insn
**ploc
)
648 struct insn_link
*link
;
653 bb
= BLOCK_FOR_INSN (insn
);
654 for (next
= NEXT_INSN (insn
);
655 next
&& BLOCK_FOR_INSN (next
) == bb
;
656 next
= NEXT_INSN (next
))
657 if (NONDEBUG_INSN_P (next
) && dead_or_set_p (next
, dest
))
659 FOR_EACH_LOG_LINK (link
, next
)
660 if (link
->insn
== insn
&& link
->regno
== REGNO (dest
))
665 result
= find_single_use_1 (dest
, &PATTERN (next
));
675 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
676 insn. The substitution can be undone by undo_all. If INTO is already
677 set to NEWVAL, do not record this change. Because computing NEWVAL might
678 also call SUBST, we have to compute it before we put anything into
682 do_SUBST (rtx
*into
, rtx newval
)
687 if (oldval
== newval
)
690 /* We'd like to catch as many invalid transformations here as
691 possible. Unfortunately, there are way too many mode changes
692 that are perfectly valid, so we'd waste too much effort for
693 little gain doing the checks here. Focus on catching invalid
694 transformations involving integer constants. */
695 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
696 && CONST_INT_P (newval
))
698 /* Sanity check that we're replacing oldval with a CONST_INT
699 that is a valid sign-extension for the original mode. */
700 gcc_assert (INTVAL (newval
)
701 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
703 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
704 CONST_INT is not valid, because after the replacement, the
705 original mode would be gone. Unfortunately, we can't tell
706 when do_SUBST is called to replace the operand thereof, so we
707 perform this test on oldval instead, checking whether an
708 invalid replacement took place before we got here. */
709 gcc_assert (!(GET_CODE (oldval
) == SUBREG
710 && CONST_INT_P (SUBREG_REG (oldval
))));
711 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
712 && CONST_INT_P (XEXP (oldval
, 0))));
716 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
718 buf
= XNEW (struct undo
);
720 buf
->kind
= UNDO_RTX
;
722 buf
->old_contents
.r
= oldval
;
725 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
728 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
730 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
731 for the value of a HOST_WIDE_INT value (including CONST_INT) is
735 do_SUBST_INT (int *into
, int newval
)
740 if (oldval
== newval
)
744 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
746 buf
= XNEW (struct undo
);
748 buf
->kind
= UNDO_INT
;
750 buf
->old_contents
.i
= oldval
;
753 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
756 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
758 /* Similar to SUBST, but just substitute the mode. This is used when
759 changing the mode of a pseudo-register, so that any other
760 references to the entry in the regno_reg_rtx array will change as
764 do_SUBST_MODE (rtx
*into
, machine_mode newval
)
767 machine_mode oldval
= GET_MODE (*into
);
769 if (oldval
== newval
)
773 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
775 buf
= XNEW (struct undo
);
777 buf
->kind
= UNDO_MODE
;
779 buf
->old_contents
.m
= oldval
;
780 adjust_reg_mode (*into
, newval
);
782 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
785 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
787 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
790 do_SUBST_LINK (struct insn_link
**into
, struct insn_link
*newval
)
793 struct insn_link
* oldval
= *into
;
795 if (oldval
== newval
)
799 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
801 buf
= XNEW (struct undo
);
803 buf
->kind
= UNDO_LINKS
;
805 buf
->old_contents
.l
= oldval
;
808 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
811 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
813 /* Subroutine of try_combine. Determine whether the replacement patterns
814 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
815 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
816 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
817 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
818 of all the instructions can be estimated and the replacements are more
819 expensive than the original sequence. */
822 combine_validate_cost (rtx_insn
*i0
, rtx_insn
*i1
, rtx_insn
*i2
, rtx_insn
*i3
,
823 rtx newpat
, rtx newi2pat
, rtx newotherpat
)
825 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
826 int new_i2_cost
, new_i3_cost
;
827 int old_cost
, new_cost
;
829 /* Lookup the original insn_costs. */
830 i2_cost
= INSN_COST (i2
);
831 i3_cost
= INSN_COST (i3
);
835 i1_cost
= INSN_COST (i1
);
838 i0_cost
= INSN_COST (i0
);
839 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
840 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
844 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
845 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
851 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
852 i1_cost
= i0_cost
= 0;
855 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
857 if (old_cost
&& i1
&& INSN_UID (i1
) == INSN_UID (i2
))
861 /* Calculate the replacement insn_costs. */
862 rtx tmp
= PATTERN (i3
);
863 PATTERN (i3
) = newpat
;
864 int tmpi
= INSN_CODE (i3
);
866 new_i3_cost
= insn_cost (i3
, optimize_this_for_speed_p
);
868 INSN_CODE (i3
) = tmpi
;
872 PATTERN (i2
) = newi2pat
;
873 tmpi
= INSN_CODE (i2
);
875 new_i2_cost
= insn_cost (i2
, optimize_this_for_speed_p
);
877 INSN_CODE (i2
) = tmpi
;
878 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
879 ? new_i2_cost
+ new_i3_cost
: 0;
883 new_cost
= new_i3_cost
;
887 if (undobuf
.other_insn
)
889 int old_other_cost
, new_other_cost
;
891 old_other_cost
= INSN_COST (undobuf
.other_insn
);
892 tmp
= PATTERN (undobuf
.other_insn
);
893 PATTERN (undobuf
.other_insn
) = newotherpat
;
894 tmpi
= INSN_CODE (undobuf
.other_insn
);
895 INSN_CODE (undobuf
.other_insn
) = -1;
896 new_other_cost
= insn_cost (undobuf
.other_insn
,
897 optimize_this_for_speed_p
);
898 PATTERN (undobuf
.other_insn
) = tmp
;
899 INSN_CODE (undobuf
.other_insn
) = tmpi
;
900 if (old_other_cost
> 0 && new_other_cost
> 0)
902 old_cost
+= old_other_cost
;
903 new_cost
+= new_other_cost
;
909 /* Disallow this combination if both new_cost and old_cost are greater than
910 zero, and new_cost is greater than old cost. */
911 int reject
= old_cost
> 0 && new_cost
> old_cost
;
915 fprintf (dump_file
, "%s combination of insns ",
916 reject
? "rejecting" : "allowing");
918 fprintf (dump_file
, "%d, ", INSN_UID (i0
));
919 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
920 fprintf (dump_file
, "%d, ", INSN_UID (i1
));
921 fprintf (dump_file
, "%d and %d\n", INSN_UID (i2
), INSN_UID (i3
));
923 fprintf (dump_file
, "original costs ");
925 fprintf (dump_file
, "%d + ", i0_cost
);
926 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
927 fprintf (dump_file
, "%d + ", i1_cost
);
928 fprintf (dump_file
, "%d + %d = %d\n", i2_cost
, i3_cost
, old_cost
);
931 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
932 new_i2_cost
, new_i3_cost
, new_cost
);
934 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
940 /* Update the uid_insn_cost array with the replacement costs. */
941 INSN_COST (i2
) = new_i2_cost
;
942 INSN_COST (i3
) = new_i3_cost
;
954 /* Delete any insns that copy a register to itself.
955 Return true if the CFG was changed. */
958 delete_noop_moves (void)
960 rtx_insn
*insn
, *next
;
963 bool edges_deleted
= false;
965 FOR_EACH_BB_FN (bb
, cfun
)
967 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
969 next
= NEXT_INSN (insn
);
970 if (INSN_P (insn
) && noop_move_p (insn
))
973 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
975 edges_deleted
|= delete_insn_and_edges (insn
);
980 return edges_deleted
;
984 /* Return false if we do not want to (or cannot) combine DEF. */
986 can_combine_def_p (df_ref def
)
988 /* Do not consider if it is pre/post modification in MEM. */
989 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
992 unsigned int regno
= DF_REF_REGNO (def
);
994 /* Do not combine frame pointer adjustments. */
995 if ((regno
== FRAME_POINTER_REGNUM
996 && (!reload_completed
|| frame_pointer_needed
))
997 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
998 && regno
== HARD_FRAME_POINTER_REGNUM
999 && (!reload_completed
|| frame_pointer_needed
))
1000 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1001 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
]))
1007 /* Return false if we do not want to (or cannot) combine USE. */
1009 can_combine_use_p (df_ref use
)
1011 /* Do not consider the usage of the stack pointer by function call. */
1012 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1018 /* Fill in log links field for all insns. */
1021 create_log_links (void)
1024 rtx_insn
**next_use
;
1028 next_use
= XCNEWVEC (rtx_insn
*, max_reg_num ());
1030 /* Pass through each block from the end, recording the uses of each
1031 register and establishing log links when def is encountered.
1032 Note that we do not clear next_use array in order to save time,
1033 so we have to test whether the use is in the same basic block as def.
1035 There are a few cases below when we do not consider the definition or
1036 usage -- these are taken from original flow.c did. Don't ask me why it is
1037 done this way; I don't know and if it works, I don't want to know. */
1039 FOR_EACH_BB_FN (bb
, cfun
)
1041 FOR_BB_INSNS_REVERSE (bb
, insn
)
1043 if (!NONDEBUG_INSN_P (insn
))
1046 /* Log links are created only once. */
1047 gcc_assert (!LOG_LINKS (insn
));
1049 FOR_EACH_INSN_DEF (def
, insn
)
1051 unsigned int regno
= DF_REF_REGNO (def
);
1054 if (!next_use
[regno
])
1057 if (!can_combine_def_p (def
))
1060 use_insn
= next_use
[regno
];
1061 next_use
[regno
] = NULL
;
1063 if (BLOCK_FOR_INSN (use_insn
) != bb
)
1068 We don't build a LOG_LINK for hard registers contained
1069 in ASM_OPERANDs. If these registers get replaced,
1070 we might wind up changing the semantics of the insn,
1071 even if reload can make what appear to be valid
1072 assignments later. */
1073 if (regno
< FIRST_PSEUDO_REGISTER
1074 && asm_noperands (PATTERN (use_insn
)) >= 0)
1077 /* Don't add duplicate links between instructions. */
1078 struct insn_link
*links
;
1079 FOR_EACH_LOG_LINK (links
, use_insn
)
1080 if (insn
== links
->insn
&& regno
== links
->regno
)
1084 LOG_LINKS (use_insn
)
1085 = alloc_insn_link (insn
, regno
, LOG_LINKS (use_insn
));
1088 FOR_EACH_INSN_USE (use
, insn
)
1089 if (can_combine_use_p (use
))
1090 next_use
[DF_REF_REGNO (use
)] = insn
;
1097 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1098 true if we found a LOG_LINK that proves that A feeds B. This only works
1099 if there are no instructions between A and B which could have a link
1100 depending on A, since in that case we would not record a link for B. */
1103 insn_a_feeds_b (rtx_insn
*a
, rtx_insn
*b
)
1105 struct insn_link
*links
;
1106 FOR_EACH_LOG_LINK (links
, b
)
1107 if (links
->insn
== a
)
1112 /* Main entry point for combiner. F is the first insn of the function.
1113 NREGS is the first unused pseudo-reg number.
1115 Return nonzero if the CFG was changed (e.g. if the combiner has
1116 turned an indirect jump instruction into a direct jump). */
1118 combine_instructions (rtx_insn
*f
, unsigned int nregs
)
1120 rtx_insn
*insn
, *next
;
1121 struct insn_link
*links
, *nextlinks
;
1123 basic_block last_bb
;
1125 int new_direct_jump_p
= 0;
1127 for (first
= f
; first
&& !NONDEBUG_INSN_P (first
); )
1128 first
= NEXT_INSN (first
);
1132 combine_attempts
= 0;
1135 combine_successes
= 0;
1137 rtl_hooks
= combine_rtl_hooks
;
1139 reg_stat
.safe_grow_cleared (nregs
, true);
1141 init_recog_no_volatile ();
1143 /* Allocate array for insn info. */
1144 max_uid_known
= get_max_uid ();
1145 uid_log_links
= XCNEWVEC (struct insn_link
*, max_uid_known
+ 1);
1146 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1147 gcc_obstack_init (&insn_link_obstack
);
1149 nonzero_bits_mode
= int_mode_for_size (HOST_BITS_PER_WIDE_INT
, 0).require ();
1151 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1152 problems when, for example, we have j <<= 1 in a loop. */
1154 nonzero_sign_valid
= 0;
1155 label_tick
= label_tick_ebb_start
= 1;
1157 /* Scan all SETs and see if we can deduce anything about what
1158 bits are known to be zero for some registers and how many copies
1159 of the sign bit are known to exist for those registers.
1161 Also set any known values so that we can use it while searching
1162 for what bits are known to be set. */
1164 setup_incoming_promotions (first
);
1165 /* Allow the entry block and the first block to fall into the same EBB.
1166 Conceptually the incoming promotions are assigned to the entry block. */
1167 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1169 create_log_links ();
1170 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1172 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1177 if (!single_pred_p (this_basic_block
)
1178 || single_pred (this_basic_block
) != last_bb
)
1179 label_tick_ebb_start
= label_tick
;
1180 last_bb
= this_basic_block
;
1182 FOR_BB_INSNS (this_basic_block
, insn
)
1183 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1187 subst_low_luid
= DF_INSN_LUID (insn
);
1190 note_stores (insn
, set_nonzero_bits_and_sign_copies
, insn
);
1191 record_dead_and_set_regs (insn
);
1194 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1195 if (REG_NOTE_KIND (links
) == REG_INC
)
1196 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1199 /* Record the current insn_cost of this instruction. */
1200 INSN_COST (insn
) = insn_cost (insn
, optimize_this_for_speed_p
);
1203 fprintf (dump_file
, "insn_cost %d for ", INSN_COST (insn
));
1204 dump_insn_slim (dump_file
, insn
);
1209 nonzero_sign_valid
= 1;
1211 /* Now scan all the insns in forward order. */
1212 label_tick
= label_tick_ebb_start
= 1;
1214 setup_incoming_promotions (first
);
1215 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1216 int max_combine
= param_max_combine_insns
;
1218 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1220 rtx_insn
*last_combined_insn
= NULL
;
1222 /* Ignore instruction combination in basic blocks that are going to
1223 be removed as unreachable anyway. See PR82386. */
1224 if (EDGE_COUNT (this_basic_block
->preds
) == 0)
1227 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1232 if (!single_pred_p (this_basic_block
)
1233 || single_pred (this_basic_block
) != last_bb
)
1234 label_tick_ebb_start
= label_tick
;
1235 last_bb
= this_basic_block
;
1237 rtl_profile_for_bb (this_basic_block
);
1238 for (insn
= BB_HEAD (this_basic_block
);
1239 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1240 insn
= next
? next
: NEXT_INSN (insn
))
1243 if (!NONDEBUG_INSN_P (insn
))
1246 while (last_combined_insn
1247 && (!NONDEBUG_INSN_P (last_combined_insn
)
1248 || last_combined_insn
->deleted ()))
1249 last_combined_insn
= PREV_INSN (last_combined_insn
);
1250 if (last_combined_insn
== NULL_RTX
1251 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1252 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1253 last_combined_insn
= insn
;
1255 /* See if we know about function return values before this
1256 insn based upon SUBREG flags. */
1257 check_promoted_subreg (insn
, PATTERN (insn
));
1259 /* See if we can find hardregs and subreg of pseudos in
1260 narrower modes. This could help turning TRUNCATEs
1262 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1264 /* Try this insn with each insn it links back to. */
1266 FOR_EACH_LOG_LINK (links
, insn
)
1267 if ((next
= try_combine (insn
, links
->insn
, NULL
,
1268 NULL
, &new_direct_jump_p
,
1269 last_combined_insn
)) != 0)
1271 statistics_counter_event (cfun
, "two-insn combine", 1);
1275 /* Try each sequence of three linked insns ending with this one. */
1277 if (max_combine
>= 3)
1278 FOR_EACH_LOG_LINK (links
, insn
)
1280 rtx_insn
*link
= links
->insn
;
1282 /* If the linked insn has been replaced by a note, then there
1283 is no point in pursuing this chain any further. */
1287 FOR_EACH_LOG_LINK (nextlinks
, link
)
1288 if ((next
= try_combine (insn
, link
, nextlinks
->insn
,
1289 NULL
, &new_direct_jump_p
,
1290 last_combined_insn
)) != 0)
1292 statistics_counter_event (cfun
, "three-insn combine", 1);
1297 /* Try combining an insn with two different insns whose results it
1299 if (max_combine
>= 3)
1300 FOR_EACH_LOG_LINK (links
, insn
)
1301 for (nextlinks
= links
->next
; nextlinks
;
1302 nextlinks
= nextlinks
->next
)
1303 if ((next
= try_combine (insn
, links
->insn
,
1304 nextlinks
->insn
, NULL
,
1306 last_combined_insn
)) != 0)
1309 statistics_counter_event (cfun
, "three-insn combine", 1);
1313 /* Try four-instruction combinations. */
1314 if (max_combine
>= 4)
1315 FOR_EACH_LOG_LINK (links
, insn
)
1317 struct insn_link
*next1
;
1318 rtx_insn
*link
= links
->insn
;
1320 /* If the linked insn has been replaced by a note, then there
1321 is no point in pursuing this chain any further. */
1325 FOR_EACH_LOG_LINK (next1
, link
)
1327 rtx_insn
*link1
= next1
->insn
;
1330 /* I0 -> I1 -> I2 -> I3. */
1331 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1332 if ((next
= try_combine (insn
, link
, link1
,
1335 last_combined_insn
)) != 0)
1337 statistics_counter_event (cfun
, "four-insn combine", 1);
1340 /* I0, I1 -> I2, I2 -> I3. */
1341 for (nextlinks
= next1
->next
; nextlinks
;
1342 nextlinks
= nextlinks
->next
)
1343 if ((next
= try_combine (insn
, link
, link1
,
1346 last_combined_insn
)) != 0)
1348 statistics_counter_event (cfun
, "four-insn combine", 1);
1353 for (next1
= links
->next
; next1
; next1
= next1
->next
)
1355 rtx_insn
*link1
= next1
->insn
;
1358 /* I0 -> I2; I1, I2 -> I3. */
1359 FOR_EACH_LOG_LINK (nextlinks
, link
)
1360 if ((next
= try_combine (insn
, link
, link1
,
1363 last_combined_insn
)) != 0)
1365 statistics_counter_event (cfun
, "four-insn combine", 1);
1368 /* I0 -> I1; I1, I2 -> I3. */
1369 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1370 if ((next
= try_combine (insn
, link
, link1
,
1373 last_combined_insn
)) != 0)
1375 statistics_counter_event (cfun
, "four-insn combine", 1);
1381 /* Try this insn with each REG_EQUAL note it links back to. */
1382 FOR_EACH_LOG_LINK (links
, insn
)
1385 rtx_insn
*temp
= links
->insn
;
1386 if ((set
= single_set (temp
)) != 0
1387 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1388 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1389 && ! side_effects_p (SET_SRC (set
))
1390 /* Avoid using a register that may already been marked
1391 dead by an earlier instruction. */
1392 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1393 && (GET_MODE (note
) == VOIDmode
1394 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1395 : (GET_MODE (SET_DEST (set
)) == GET_MODE (note
)
1396 && (GET_CODE (SET_DEST (set
)) != ZERO_EXTRACT
1397 || (GET_MODE (XEXP (SET_DEST (set
), 0))
1398 == GET_MODE (note
))))))
1400 /* Temporarily replace the set's source with the
1401 contents of the REG_EQUAL note. The insn will
1402 be deleted or recognized by try_combine. */
1403 rtx orig_src
= SET_SRC (set
);
1404 rtx orig_dest
= SET_DEST (set
);
1405 if (GET_CODE (SET_DEST (set
)) == ZERO_EXTRACT
)
1406 SET_DEST (set
) = XEXP (SET_DEST (set
), 0);
1407 SET_SRC (set
) = note
;
1409 i2mod_old_rhs
= copy_rtx (orig_src
);
1410 i2mod_new_rhs
= copy_rtx (note
);
1411 next
= try_combine (insn
, i2mod
, NULL
, NULL
,
1413 last_combined_insn
);
1417 statistics_counter_event (cfun
, "insn-with-note combine", 1);
1420 SET_SRC (set
) = orig_src
;
1421 SET_DEST (set
) = orig_dest
;
1426 record_dead_and_set_regs (insn
);
1433 default_rtl_profile ();
1435 new_direct_jump_p
|= purge_all_dead_edges ();
1436 new_direct_jump_p
|= delete_noop_moves ();
1439 obstack_free (&insn_link_obstack
, NULL
);
1440 free (uid_log_links
);
1441 free (uid_insn_cost
);
1442 reg_stat
.release ();
1445 struct undo
*undo
, *next
;
1446 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1454 total_attempts
+= combine_attempts
;
1455 total_merges
+= combine_merges
;
1456 total_extras
+= combine_extras
;
1457 total_successes
+= combine_successes
;
1459 nonzero_sign_valid
= 0;
1460 rtl_hooks
= general_rtl_hooks
;
1462 /* Make recognizer allow volatile MEMs again. */
1465 return new_direct_jump_p
;
1468 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1471 init_reg_last (void)
1476 FOR_EACH_VEC_ELT (reg_stat
, i
, p
)
1477 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1480 /* Set up any promoted values for incoming argument registers. */
1483 setup_incoming_promotions (rtx_insn
*first
)
1486 bool strictly_local
= false;
1488 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1489 arg
= DECL_CHAIN (arg
))
1491 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1493 machine_mode mode1
, mode2
, mode3
, mode4
;
1495 /* Only continue if the incoming argument is in a register. */
1499 /* Determine, if possible, whether all call sites of the current
1500 function lie within the current compilation unit. (This does
1501 take into account the exporting of a function via taking its
1502 address, and so forth.) */
1504 = cgraph_node::local_info_node (current_function_decl
)->local
;
1506 /* The mode and signedness of the argument before any promotions happen
1507 (equal to the mode of the pseudo holding it at that stage). */
1508 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1509 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1511 /* The mode and signedness of the argument after any source language and
1512 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1513 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1514 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1516 /* The mode and signedness of the argument as it is actually passed,
1517 see assign_parm_setup_reg in function.cc. */
1518 mode3
= promote_function_mode (TREE_TYPE (arg
), mode1
, &uns3
,
1519 TREE_TYPE (cfun
->decl
), 0);
1521 /* The mode of the register in which the argument is being passed. */
1522 mode4
= GET_MODE (reg
);
1524 /* Eliminate sign extensions in the callee when:
1525 (a) A mode promotion has occurred; */
1528 /* (b) The mode of the register is the same as the mode of
1529 the argument as it is passed; */
1532 /* (c) There's no language level extension; */
1535 /* (c.1) All callers are from the current compilation unit. If that's
1536 the case we don't have to rely on an ABI, we only have to know
1537 what we're generating right now, and we know that we will do the
1538 mode1 to mode2 promotion with the given sign. */
1539 else if (!strictly_local
)
1541 /* (c.2) The combination of the two promotions is useful. This is
1542 true when the signs match, or if the first promotion is unsigned.
1543 In the later case, (sign_extend (zero_extend x)) is the same as
1544 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1550 /* Record that the value was promoted from mode1 to mode3,
1551 so that any sign extension at the head of the current
1552 function may be eliminated. */
1553 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1554 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1555 record_value_for_reg (reg
, first
, x
);
1559 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1560 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1561 because some machines (maybe most) will actually do the sign-extension and
1562 this is the conservative approach.
1564 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1568 sign_extend_short_imm (rtx src
, machine_mode mode
, unsigned int prec
)
1570 scalar_int_mode int_mode
;
1571 if (CONST_INT_P (src
)
1572 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1573 && GET_MODE_PRECISION (int_mode
) < prec
1575 && val_signbit_known_set_p (int_mode
, INTVAL (src
)))
1576 src
= GEN_INT (INTVAL (src
) | ~GET_MODE_MASK (int_mode
));
1581 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1585 update_rsp_from_reg_equal (reg_stat_type
*rsp
, rtx_insn
*insn
, const_rtx set
,
1588 rtx reg_equal_note
= insn
? find_reg_equal_equiv_note (insn
) : NULL_RTX
;
1589 unsigned HOST_WIDE_INT bits
= 0;
1590 rtx reg_equal
= NULL
, src
= SET_SRC (set
);
1591 unsigned int num
= 0;
1594 reg_equal
= XEXP (reg_equal_note
, 0);
1596 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
1598 src
= sign_extend_short_imm (src
, GET_MODE (x
), BITS_PER_WORD
);
1600 reg_equal
= sign_extend_short_imm (reg_equal
, GET_MODE (x
), BITS_PER_WORD
);
1603 /* Don't call nonzero_bits if it cannot change anything. */
1604 if (rsp
->nonzero_bits
!= HOST_WIDE_INT_M1U
)
1606 machine_mode mode
= GET_MODE (x
);
1607 if (GET_MODE_CLASS (mode
) == MODE_INT
1608 && HWI_COMPUTABLE_MODE_P (mode
))
1609 mode
= nonzero_bits_mode
;
1610 bits
= nonzero_bits (src
, mode
);
1611 if (reg_equal
&& bits
)
1612 bits
&= nonzero_bits (reg_equal
, mode
);
1613 rsp
->nonzero_bits
|= bits
;
1616 /* Don't call num_sign_bit_copies if it cannot change anything. */
1617 if (rsp
->sign_bit_copies
!= 1)
1619 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1620 if (reg_equal
&& maybe_ne (num
, GET_MODE_PRECISION (GET_MODE (x
))))
1622 unsigned int numeq
= num_sign_bit_copies (reg_equal
, GET_MODE (x
));
1623 if (num
== 0 || numeq
> num
)
1626 if (rsp
->sign_bit_copies
== 0 || num
< rsp
->sign_bit_copies
)
1627 rsp
->sign_bit_copies
= num
;
1631 /* Called via note_stores. If X is a pseudo that is narrower than
1632 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1634 If we are setting only a portion of X and we can't figure out what
1635 portion, assume all bits will be used since we don't know what will
1638 Similarly, set how many bits of X are known to be copies of the sign bit
1639 at all locations in the function. This is the smallest number implied
1643 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1645 rtx_insn
*insn
= (rtx_insn
*) data
;
1646 scalar_int_mode mode
;
1649 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1650 /* If this register is undefined at the start of the file, we can't
1651 say what its contents were. */
1652 && ! REGNO_REG_SET_P
1653 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), REGNO (x
))
1654 && is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
1655 && HWI_COMPUTABLE_MODE_P (mode
))
1657 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
1659 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1661 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1662 rsp
->sign_bit_copies
= 1;
1666 /* If this register is being initialized using itself, and the
1667 register is uninitialized in this basic block, and there are
1668 no LOG_LINKS which set the register, then part of the
1669 register is uninitialized. In that case we can't assume
1670 anything about the number of nonzero bits.
1672 ??? We could do better if we checked this in
1673 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1674 could avoid making assumptions about the insn which initially
1675 sets the register, while still using the information in other
1676 insns. We would have to be careful to check every insn
1677 involved in the combination. */
1680 && reg_referenced_p (x
, PATTERN (insn
))
1681 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1684 struct insn_link
*link
;
1686 FOR_EACH_LOG_LINK (link
, insn
)
1687 if (dead_or_set_p (link
->insn
, x
))
1691 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1692 rsp
->sign_bit_copies
= 1;
1697 /* If this is a complex assignment, see if we can convert it into a
1698 simple assignment. */
1699 set
= expand_field_assignment (set
);
1701 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1702 set what we know about X. */
1704 if (SET_DEST (set
) == x
1705 || (paradoxical_subreg_p (SET_DEST (set
))
1706 && SUBREG_REG (SET_DEST (set
)) == x
))
1707 update_rsp_from_reg_equal (rsp
, insn
, set
, x
);
1710 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1711 rsp
->sign_bit_copies
= 1;
1716 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1717 optionally insns that were previously combined into I3 or that will be
1718 combined into the merger of INSN and I3. The order is PRED, PRED2,
1719 INSN, SUCC, SUCC2, I3.
1721 Return 0 if the combination is not allowed for any reason.
1723 If the combination is allowed, *PDEST will be set to the single
1724 destination of INSN and *PSRC to the single source, and this function
1728 can_combine_p (rtx_insn
*insn
, rtx_insn
*i3
, rtx_insn
*pred ATTRIBUTE_UNUSED
,
1729 rtx_insn
*pred2 ATTRIBUTE_UNUSED
, rtx_insn
*succ
, rtx_insn
*succ2
,
1730 rtx
*pdest
, rtx
*psrc
)
1737 bool all_adjacent
= true;
1738 int (*is_volatile_p
) (const_rtx
);
1744 if (next_active_insn (succ2
) != i3
)
1745 all_adjacent
= false;
1746 if (next_active_insn (succ
) != succ2
)
1747 all_adjacent
= false;
1749 else if (next_active_insn (succ
) != i3
)
1750 all_adjacent
= false;
1751 if (next_active_insn (insn
) != succ
)
1752 all_adjacent
= false;
1754 else if (next_active_insn (insn
) != i3
)
1755 all_adjacent
= false;
1757 /* Can combine only if previous insn is a SET of a REG or a SUBREG,
1758 or a PARALLEL consisting of such a SET and CLOBBERs.
1760 If INSN has CLOBBER parallel parts, ignore them for our processing.
1761 By definition, these happen during the execution of the insn. When it
1762 is merged with another insn, all bets are off. If they are, in fact,
1763 needed and aren't also supplied in I3, they may be added by
1764 recog_for_combine. Otherwise, it won't match.
1766 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1769 Get the source and destination of INSN. If more than one, can't
1772 if (GET_CODE (PATTERN (insn
)) == SET
)
1773 set
= PATTERN (insn
);
1774 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1775 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1777 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1779 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1781 switch (GET_CODE (elt
))
1783 /* This is important to combine floating point insns
1784 for the SH4 port. */
1786 /* Combining an isolated USE doesn't make sense.
1787 We depend here on combinable_i3pat to reject them. */
1788 /* The code below this loop only verifies that the inputs of
1789 the SET in INSN do not change. We call reg_set_between_p
1790 to verify that the REG in the USE does not change between
1792 If the USE in INSN was for a pseudo register, the matching
1793 insn pattern will likely match any register; combining this
1794 with any other USE would only be safe if we knew that the
1795 used registers have identical values, or if there was
1796 something to tell them apart, e.g. different modes. For
1797 now, we forgo such complicated tests and simply disallow
1798 combining of USES of pseudo registers with any other USE. */
1799 if (REG_P (XEXP (elt
, 0))
1800 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1802 rtx i3pat
= PATTERN (i3
);
1803 int i
= XVECLEN (i3pat
, 0) - 1;
1804 unsigned int regno
= REGNO (XEXP (elt
, 0));
1808 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1810 if (GET_CODE (i3elt
) == USE
1811 && REG_P (XEXP (i3elt
, 0))
1812 && (REGNO (XEXP (i3elt
, 0)) == regno
1813 ? reg_set_between_p (XEXP (elt
, 0),
1814 PREV_INSN (insn
), i3
)
1815 : regno
>= FIRST_PSEUDO_REGISTER
))
1822 /* We can ignore CLOBBERs. */
1827 /* Ignore SETs whose result isn't used but not those that
1828 have side-effects. */
1829 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1830 && insn_nothrow_p (insn
)
1831 && !side_effects_p (elt
))
1834 /* If we have already found a SET, this is a second one and
1835 so we cannot combine with this insn. */
1843 /* Anything else means we can't combine. */
1849 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1850 so don't do anything with it. */
1851 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1860 /* The simplification in expand_field_assignment may call back to
1861 get_last_value, so set safe guard here. */
1862 subst_low_luid
= DF_INSN_LUID (insn
);
1864 set
= expand_field_assignment (set
);
1865 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1867 /* Do not eliminate user-specified register if it is in an
1868 asm input because we may break the register asm usage defined
1869 in GCC manual if allow to do so.
1870 Be aware that this may cover more cases than we expect but this
1871 should be harmless. */
1872 if (REG_P (dest
) && REG_USERVAR_P (dest
) && HARD_REGISTER_P (dest
)
1873 && extract_asm_operands (PATTERN (i3
)))
1876 /* Don't eliminate a store in the stack pointer. */
1877 if (dest
== stack_pointer_rtx
1878 /* Don't combine with an insn that sets a register to itself if it has
1879 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1880 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1881 /* Can't merge an ASM_OPERANDS. */
1882 || GET_CODE (src
) == ASM_OPERANDS
1883 /* Can't merge a function call. */
1884 || GET_CODE (src
) == CALL
1885 /* Don't eliminate a function call argument. */
1887 && (find_reg_fusage (i3
, USE
, dest
)
1889 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1890 && global_regs
[REGNO (dest
)])))
1891 /* Don't substitute into an incremented register. */
1892 || FIND_REG_INC_NOTE (i3
, dest
)
1893 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1894 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1895 /* Don't substitute into a non-local goto, this confuses CFG. */
1896 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
1897 /* Make sure that DEST is not used after INSN but before SUCC, or
1898 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1901 && (reg_used_between_p (dest
, succ2
, i3
)
1902 || reg_used_between_p (dest
, succ
, succ2
)))
1903 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))
1904 || (!succ2
&& !succ
&& reg_used_between_p (dest
, insn
, i3
))
1906 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1907 that case SUCC is not in the insn stream, so use SUCC2
1908 instead for this test. */
1909 && reg_used_between_p (dest
, insn
,
1911 && INSN_UID (succ
) == INSN_UID (succ2
)
1913 /* Make sure that the value that is to be substituted for the register
1914 does not use any registers whose values alter in between. However,
1915 If the insns are adjacent, a use can't cross a set even though we
1916 think it might (this can happen for a sequence of insns each setting
1917 the same destination; last_set of that register might point to
1918 a NOTE). If INSN has a REG_EQUIV note, the register is always
1919 equivalent to the memory so the substitution is valid even if there
1920 are intervening stores. Also, don't move a volatile asm or
1921 UNSPEC_VOLATILE across any other insns. */
1924 || ! find_reg_note (insn
, REG_EQUIV
, src
))
1925 && modified_between_p (src
, insn
, i3
))
1926 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
1927 || GET_CODE (src
) == UNSPEC_VOLATILE
))
1928 /* Don't combine across a CALL_INSN, because that would possibly
1929 change whether the life span of some REGs crosses calls or not,
1930 and it is a pain to update that information.
1931 Exception: if source is a constant, moving it later can't hurt.
1932 Accept that as a special case. */
1933 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
1936 /* DEST must be a REG. */
1939 /* If register alignment is being enforced for multi-word items in all
1940 cases except for parameters, it is possible to have a register copy
1941 insn referencing a hard register that is not allowed to contain the
1942 mode being copied and which would not be valid as an operand of most
1943 insns. Eliminate this problem by not combining with such an insn.
1945 Also, on some machines we don't want to extend the life of a hard
1949 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
1950 && !targetm
.hard_regno_mode_ok (REGNO (dest
), GET_MODE (dest
)))
1951 /* Don't extend the life of a hard register unless it is
1952 user variable (if we have few registers) or it can't
1953 fit into the desired register (meaning something special
1955 Also avoid substituting a return register into I3, because
1956 reload can't handle a conflict with constraints of other
1958 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
1959 && !targetm
.hard_regno_mode_ok (REGNO (src
),
1967 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
1968 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
1969 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
1971 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
1973 /* If the clobber represents an earlyclobber operand, we must not
1974 substitute an expression containing the clobbered register.
1975 As we do not analyze the constraint strings here, we have to
1976 make the conservative assumption. However, if the register is
1977 a fixed hard reg, the clobber cannot represent any operand;
1978 we leave it up to the machine description to either accept or
1979 reject use-and-clobber patterns. */
1981 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
1982 || !fixed_regs
[REGNO (reg
)])
1983 if (reg_overlap_mentioned_p (reg
, src
))
1987 /* If INSN contains anything volatile, or is an `asm' (whether volatile
1988 or not), reject, unless nothing volatile comes between it and I3 */
1990 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
1992 /* Make sure neither succ nor succ2 contains a volatile reference. */
1993 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
1995 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
1997 /* We'll check insns between INSN and I3 below. */
2000 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2001 to be an explicit register variable, and was chosen for a reason. */
2003 if (GET_CODE (src
) == ASM_OPERANDS
2004 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
2007 /* If INSN contains volatile references (specifically volatile MEMs),
2008 we cannot combine across any other volatile references.
2009 Even if INSN doesn't contain volatile references, any intervening
2010 volatile insn might affect machine state. */
2012 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
2016 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
2017 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
2020 /* If INSN contains an autoincrement or autodecrement, make sure that
2021 register is not used between there and I3, and not already used in
2022 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2023 Also insist that I3 not be a jump if using LRA; if it were one
2024 and the incremented register were spilled, we would lose.
2025 Reload handles this correctly. */
2028 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2029 if (REG_NOTE_KIND (link
) == REG_INC
2030 && ((JUMP_P (i3
) && targetm
.lra_p ())
2031 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
2032 || (pred
!= NULL_RTX
2033 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
2034 || (pred2
!= NULL_RTX
2035 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
2036 || (succ
!= NULL_RTX
2037 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
2038 || (succ2
!= NULL_RTX
2039 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
2040 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
2043 /* If we get here, we have passed all the tests and the combination is
2052 /* LOC is the location within I3 that contains its pattern or the component
2053 of a PARALLEL of the pattern. We validate that it is valid for combining.
2055 One problem is if I3 modifies its output, as opposed to replacing it
2056 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2057 doing so would produce an insn that is not equivalent to the original insns.
2061 (set (reg:DI 101) (reg:DI 100))
2062 (set (subreg:SI (reg:DI 101) 0) <foo>)
2064 This is NOT equivalent to:
2066 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2067 (set (reg:DI 101) (reg:DI 100))])
2069 Not only does this modify 100 (in which case it might still be valid
2070 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2072 We can also run into a problem if I2 sets a register that I1
2073 uses and I1 gets directly substituted into I3 (not via I2). In that
2074 case, we would be getting the wrong value of I2DEST into I3, so we
2075 must reject the combination. This case occurs when I2 and I1 both
2076 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2077 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2078 of a SET must prevent combination from occurring. The same situation
2079 can occur for I0, in which case I0_NOT_IN_SRC is set.
2081 Before doing the above check, we first try to expand a field assignment
2082 into a set of logical operations.
2084 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2085 we place a register that is both set and used within I3. If more than one
2086 such register is detected, we fail.
2088 Return 1 if the combination is valid, zero otherwise. */
2091 combinable_i3pat (rtx_insn
*i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2092 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2096 if (GET_CODE (x
) == SET
)
2099 rtx dest
= SET_DEST (set
);
2100 rtx src
= SET_SRC (set
);
2101 rtx inner_dest
= dest
;
2104 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2105 || GET_CODE (inner_dest
) == SUBREG
2106 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2107 inner_dest
= XEXP (inner_dest
, 0);
2109 /* Check for the case where I3 modifies its output, as discussed
2110 above. We don't want to prevent pseudos from being combined
2111 into the address of a MEM, so only prevent the combination if
2112 i1 or i2 set the same MEM. */
2113 if ((inner_dest
!= dest
&&
2114 (!MEM_P (inner_dest
)
2115 || rtx_equal_p (i2dest
, inner_dest
)
2116 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2117 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2118 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2119 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2120 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2122 /* This is the same test done in can_combine_p except we can't test
2123 all_adjacent; we don't have to, since this instruction will stay
2124 in place, thus we are not considering increasing the lifetime of
2127 Also, if this insn sets a function argument, combining it with
2128 something that might need a spill could clobber a previous
2129 function argument; the all_adjacent test in can_combine_p also
2130 checks this; here, we do a more specific test for this case. */
2132 || (REG_P (inner_dest
)
2133 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2134 && !targetm
.hard_regno_mode_ok (REGNO (inner_dest
),
2135 GET_MODE (inner_dest
)))
2136 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2137 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2140 /* If DEST is used in I3, it is being killed in this insn, so
2141 record that for later. We have to consider paradoxical
2142 subregs here, since they kill the whole register, but we
2143 ignore partial subregs, STRICT_LOW_PART, etc.
2144 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2145 STACK_POINTER_REGNUM, since these are always considered to be
2146 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2148 if (GET_CODE (subdest
) == SUBREG
&& !partial_subreg_p (subdest
))
2149 subdest
= SUBREG_REG (subdest
);
2152 && reg_referenced_p (subdest
, PATTERN (i3
))
2153 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2154 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2155 || REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
)
2156 && (FRAME_POINTER_REGNUM
== ARG_POINTER_REGNUM
2157 || (REGNO (subdest
) != ARG_POINTER_REGNUM
2158 || ! fixed_regs
[REGNO (subdest
)]))
2159 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2161 if (*pi3dest_killed
)
2164 *pi3dest_killed
= subdest
;
2168 else if (GET_CODE (x
) == PARALLEL
)
2172 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2173 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2174 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2181 /* Return 1 if X is an arithmetic expression that contains a multiplication
2182 and division. We don't count multiplications by powers of two here. */
2185 contains_muldiv (rtx x
)
2187 switch (GET_CODE (x
))
2189 case MOD
: case DIV
: case UMOD
: case UDIV
:
2193 return ! (CONST_INT_P (XEXP (x
, 1))
2194 && pow2p_hwi (UINTVAL (XEXP (x
, 1))));
2197 return contains_muldiv (XEXP (x
, 0))
2198 || contains_muldiv (XEXP (x
, 1));
2201 return contains_muldiv (XEXP (x
, 0));
2207 /* Determine whether INSN can be used in a combination. Return nonzero if
2208 not. This is used in try_combine to detect early some cases where we
2209 can't perform combinations. */
2212 cant_combine_insn_p (rtx_insn
*insn
)
2217 /* If this isn't really an insn, we can't do anything.
2218 This can occur when flow deletes an insn that it has merged into an
2219 auto-increment address. */
2220 if (!NONDEBUG_INSN_P (insn
))
2223 /* Never combine loads and stores involving hard regs that are likely
2224 to be spilled. The register allocator can usually handle such
2225 reg-reg moves by tying. If we allow the combiner to make
2226 substitutions of likely-spilled regs, reload might die.
2227 As an exception, we allow combinations involving fixed regs; these are
2228 not available to the register allocator so there's no risk involved. */
2230 set
= single_set (insn
);
2233 src
= SET_SRC (set
);
2234 dest
= SET_DEST (set
);
2235 if (GET_CODE (src
) == SUBREG
)
2236 src
= SUBREG_REG (src
);
2237 if (GET_CODE (dest
) == SUBREG
)
2238 dest
= SUBREG_REG (dest
);
2239 if (REG_P (src
) && REG_P (dest
)
2240 && ((HARD_REGISTER_P (src
)
2241 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2242 #ifdef LEAF_REGISTERS
2243 && ! LEAF_REGISTERS
[REGNO (src
)])
2247 || (HARD_REGISTER_P (dest
)
2248 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2249 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2255 struct likely_spilled_retval_info
2257 unsigned regno
, nregs
;
2261 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2262 hard registers that are known to be written to / clobbered in full. */
2264 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2266 struct likely_spilled_retval_info
*const info
=
2267 (struct likely_spilled_retval_info
*) data
;
2268 unsigned regno
, nregs
;
2271 if (!REG_P (XEXP (set
, 0)))
2274 if (regno
>= info
->regno
+ info
->nregs
)
2276 nregs
= REG_NREGS (x
);
2277 if (regno
+ nregs
<= info
->regno
)
2279 new_mask
= (2U << (nregs
- 1)) - 1;
2280 if (regno
< info
->regno
)
2281 new_mask
>>= info
->regno
- regno
;
2283 new_mask
<<= regno
- info
->regno
;
2284 info
->mask
&= ~new_mask
;
2287 /* Return nonzero iff part of the return value is live during INSN, and
2288 it is likely spilled. This can happen when more than one insn is needed
2289 to copy the return value, e.g. when we consider to combine into the
2290 second copy insn for a complex value. */
2293 likely_spilled_retval_p (rtx_insn
*insn
)
2295 rtx_insn
*use
= BB_END (this_basic_block
);
2298 unsigned regno
, nregs
;
2299 /* We assume here that no machine mode needs more than
2300 32 hard registers when the value overlaps with a register
2301 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2303 struct likely_spilled_retval_info info
;
2305 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2307 reg
= XEXP (PATTERN (use
), 0);
2308 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2310 regno
= REGNO (reg
);
2311 nregs
= REG_NREGS (reg
);
2314 mask
= (2U << (nregs
- 1)) - 1;
2316 /* Disregard parts of the return value that are set later. */
2320 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2322 note_stores (p
, likely_spilled_retval_1
, &info
);
2325 /* Check if any of the (probably) live return value registers is
2330 if ((mask
& 1 << nregs
)
2331 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2337 /* Adjust INSN after we made a change to its destination.
2339 Changing the destination can invalidate notes that say something about
2340 the results of the insn and a LOG_LINK pointing to the insn. */
2343 adjust_for_new_dest (rtx_insn
*insn
)
2345 /* For notes, be conservative and simply remove them. */
2346 remove_reg_equal_equiv_notes (insn
, true);
2348 /* The new insn will have a destination that was previously the destination
2349 of an insn just above it. Call distribute_links to make a LOG_LINK from
2350 the next use of that destination. */
2352 rtx set
= single_set (insn
);
2355 rtx reg
= SET_DEST (set
);
2357 while (GET_CODE (reg
) == ZERO_EXTRACT
2358 || GET_CODE (reg
) == STRICT_LOW_PART
2359 || GET_CODE (reg
) == SUBREG
)
2360 reg
= XEXP (reg
, 0);
2361 gcc_assert (REG_P (reg
));
2363 distribute_links (alloc_insn_link (insn
, REGNO (reg
), NULL
));
2365 df_insn_rescan (insn
);
2368 /* Return TRUE if combine can reuse reg X in mode MODE.
2369 ADDED_SETS is nonzero if the original set is still required. */
2371 can_change_dest_mode (rtx x
, int added_sets
, machine_mode mode
)
2378 /* Don't change between modes with different underlying register sizes,
2379 since this could lead to invalid subregs. */
2380 if (maybe_ne (REGMODE_NATURAL_SIZE (mode
),
2381 REGMODE_NATURAL_SIZE (GET_MODE (x
))))
2385 /* Allow hard registers if the new mode is legal, and occupies no more
2386 registers than the old mode. */
2387 if (regno
< FIRST_PSEUDO_REGISTER
)
2388 return (targetm
.hard_regno_mode_ok (regno
, mode
)
2389 && REG_NREGS (x
) >= hard_regno_nregs (regno
, mode
));
2391 /* Or a pseudo that is only used once. */
2392 return (regno
< reg_n_sets_max
2393 && REG_N_SETS (regno
) == 1
2395 && !REG_USERVAR_P (x
));
2399 /* Check whether X, the destination of a set, refers to part of
2400 the register specified by REG. */
2403 reg_subword_p (rtx x
, rtx reg
)
2405 /* Check that reg is an integer mode register. */
2406 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2409 if (GET_CODE (x
) == STRICT_LOW_PART
2410 || GET_CODE (x
) == ZERO_EXTRACT
)
2413 return GET_CODE (x
) == SUBREG
2414 && !paradoxical_subreg_p (x
)
2415 && SUBREG_REG (x
) == reg
2416 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2419 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2420 by an arbitrary number of CLOBBERs. */
2422 is_parallel_of_n_reg_sets (rtx pat
, int n
)
2424 if (GET_CODE (pat
) != PARALLEL
)
2427 int len
= XVECLEN (pat
, 0);
2432 for (i
= 0; i
< n
; i
++)
2433 if (GET_CODE (XVECEXP (pat
, 0, i
)) != SET
2434 || !REG_P (SET_DEST (XVECEXP (pat
, 0, i
))))
2436 for ( ; i
< len
; i
++)
2437 switch (GET_CODE (XVECEXP (pat
, 0, i
)))
2440 if (XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
2449 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2450 CLOBBERs), can be split into individual SETs in that order, without
2451 changing semantics. */
2453 can_split_parallel_of_n_reg_sets (rtx_insn
*insn
, int n
)
2455 if (!insn_nothrow_p (insn
))
2458 rtx pat
= PATTERN (insn
);
2461 for (i
= 0; i
< n
; i
++)
2463 if (side_effects_p (SET_SRC (XVECEXP (pat
, 0, i
))))
2466 rtx reg
= SET_DEST (XVECEXP (pat
, 0, i
));
2468 for (j
= i
+ 1; j
< n
; j
++)
2469 if (reg_referenced_p (reg
, XVECEXP (pat
, 0, j
)))
2476 /* Return whether X is just a single_set, with the source
2477 a general_operand. */
2479 is_just_move (rtx_insn
*x
)
2481 rtx set
= single_set (x
);
2485 return general_operand (SET_SRC (set
), VOIDmode
);
2488 /* Callback function to count autoincs. */
2491 count_auto_inc (rtx
, rtx
, rtx
, rtx
, rtx
, void *arg
)
2498 /* Try to combine the insns I0, I1 and I2 into I3.
2499 Here I0, I1 and I2 appear earlier than I3.
2500 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2503 If we are combining more than two insns and the resulting insn is not
2504 recognized, try splitting it into two insns. If that happens, I2 and I3
2505 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2506 Otherwise, I0, I1 and I2 are pseudo-deleted.
2508 Return 0 if the combination does not work. Then nothing is changed.
2509 If we did the combination, return the insn at which combine should
2512 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2513 new direct jump instruction.
2515 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2516 been I3 passed to an earlier try_combine within the same basic
2520 try_combine (rtx_insn
*i3
, rtx_insn
*i2
, rtx_insn
*i1
, rtx_insn
*i0
,
2521 int *new_direct_jump_p
, rtx_insn
*last_combined_insn
)
2523 /* New patterns for I3 and I2, respectively. */
2524 rtx newpat
, newi2pat
= 0;
2525 rtvec newpat_vec_with_clobbers
= 0;
2526 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2527 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2529 int added_sets_0
, added_sets_1
, added_sets_2
;
2530 /* Total number of SETs to put into I3. */
2532 /* Nonzero if I2's or I1's body now appears in I3. */
2533 int i2_is_used
= 0, i1_is_used
= 0;
2534 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2535 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2536 /* Contains I3 if the destination of I3 is used in its source, which means
2537 that the old life of I3 is being killed. If that usage is placed into
2538 I2 and not in I3, a REG_DEAD note must be made. */
2539 rtx i3dest_killed
= 0;
2540 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2541 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2542 /* Copy of SET_SRC of I1 and I0, if needed. */
2543 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2544 /* Set if I2DEST was reused as a scratch register. */
2545 bool i2scratch
= false;
2546 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2547 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2548 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2549 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2550 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2551 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2552 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2553 /* Notes that must be added to REG_NOTES in I3 and I2. */
2554 rtx new_i3_notes
, new_i2_notes
;
2555 /* Notes that we substituted I3 into I2 instead of the normal case. */
2556 int i3_subst_into_i2
= 0;
2557 /* Notes that I1, I2 or I3 is a MULT operation. */
2561 int changed_i3_dest
= 0;
2562 bool i2_was_move
= false, i3_was_move
= false;
2566 rtx_insn
*temp_insn
;
2568 struct insn_link
*link
;
2570 rtx new_other_notes
;
2572 scalar_int_mode dest_mode
, temp_mode
;
2574 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2576 if (i1
== i2
|| i0
== i2
|| (i0
&& i0
== i1
))
2579 /* Only try four-insn combinations when there's high likelihood of
2580 success. Look for simple insns, such as loads of constants or
2581 binary operations involving a constant. */
2589 if (!flag_expensive_optimizations
)
2592 for (i
= 0; i
< 4; i
++)
2594 rtx_insn
*insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2595 rtx set
= single_set (insn
);
2599 src
= SET_SRC (set
);
2600 if (CONSTANT_P (src
))
2605 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2607 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2608 || GET_CODE (src
) == LSHIFTRT
)
2612 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2613 are likely manipulating its value. Ideally we'll be able to combine
2614 all four insns into a bitfield insertion of some kind.
2616 Note the source in I0 might be inside a sign/zero extension and the
2617 memory modes in I0 and I3 might be different. So extract the address
2618 from the destination of I3 and search for it in the source of I0.
2620 In the event that there's a match but the source/dest do not actually
2621 refer to the same memory, the worst that happens is we try some
2622 combinations that we wouldn't have otherwise. */
2623 if ((set0
= single_set (i0
))
2624 /* Ensure the source of SET0 is a MEM, possibly buried inside
2626 && (GET_CODE (SET_SRC (set0
)) == MEM
2627 || ((GET_CODE (SET_SRC (set0
)) == ZERO_EXTEND
2628 || GET_CODE (SET_SRC (set0
)) == SIGN_EXTEND
)
2629 && GET_CODE (XEXP (SET_SRC (set0
), 0)) == MEM
))
2630 && (set3
= single_set (i3
))
2631 /* Ensure the destination of SET3 is a MEM. */
2632 && GET_CODE (SET_DEST (set3
)) == MEM
2633 /* Would it be better to extract the base address for the MEM
2634 in SET3 and look for that? I don't have cases where it matters
2635 but I could envision such cases. */
2636 && rtx_referenced_p (XEXP (SET_DEST (set3
), 0), SET_SRC (set0
)))
2639 if (ngood
< 2 && nshift
< 2)
2643 /* Exit early if one of the insns involved can't be used for
2646 || (i1
&& CALL_P (i1
))
2647 || (i0
&& CALL_P (i0
))
2648 || cant_combine_insn_p (i3
)
2649 || cant_combine_insn_p (i2
)
2650 || (i1
&& cant_combine_insn_p (i1
))
2651 || (i0
&& cant_combine_insn_p (i0
))
2652 || likely_spilled_retval_p (i3
))
2656 undobuf
.other_insn
= 0;
2658 /* Reset the hard register usage information. */
2659 CLEAR_HARD_REG_SET (newpat_used_regs
);
2661 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2664 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2665 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2667 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2668 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2670 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2671 INSN_UID (i2
), INSN_UID (i3
));
2674 dump_insn_slim (dump_file
, i0
);
2676 dump_insn_slim (dump_file
, i1
);
2677 dump_insn_slim (dump_file
, i2
);
2678 dump_insn_slim (dump_file
, i3
);
2681 /* If multiple insns feed into one of I2 or I3, they can be in any
2682 order. To simplify the code below, reorder them in sequence. */
2683 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2685 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2687 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2690 added_links_insn
= 0;
2691 added_notes_insn
= 0;
2693 /* First check for one important special case that the code below will
2694 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2695 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2696 we may be able to replace that destination with the destination of I3.
2697 This occurs in the common code where we compute both a quotient and
2698 remainder into a structure, in which case we want to do the computation
2699 directly into the structure to avoid register-register copies.
2701 Note that this case handles both multiple sets in I2 and also cases
2702 where I2 has a number of CLOBBERs inside the PARALLEL.
2704 We make very conservative checks below and only try to handle the
2705 most common cases of this. For example, we only handle the case
2706 where I2 and I3 are adjacent to avoid making difficult register
2709 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2710 && REG_P (SET_SRC (PATTERN (i3
)))
2711 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2712 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2713 && GET_CODE (PATTERN (i2
)) == PARALLEL
2714 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2715 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2716 below would need to check what is inside (and reg_overlap_mentioned_p
2717 doesn't support those codes anyway). Don't allow those destinations;
2718 the resulting insn isn't likely to be recognized anyway. */
2719 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2720 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2721 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2722 SET_DEST (PATTERN (i3
)))
2723 && next_active_insn (i2
) == i3
)
2725 rtx p2
= PATTERN (i2
);
2727 /* Make sure that the destination of I3,
2728 which we are going to substitute into one output of I2,
2729 is not used within another output of I2. We must avoid making this:
2730 (parallel [(set (mem (reg 69)) ...)
2731 (set (reg 69) ...)])
2732 which is not well-defined as to order of actions.
2733 (Besides, reload can't handle output reloads for this.)
2735 The problem can also happen if the dest of I3 is a memory ref,
2736 if another dest in I2 is an indirect memory ref.
2738 Neither can this PARALLEL be an asm. We do not allow combining
2739 that usually (see can_combine_p), so do not here either. */
2741 for (i
= 0; ok
&& i
< XVECLEN (p2
, 0); i
++)
2743 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2744 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
)
2745 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2746 SET_DEST (XVECEXP (p2
, 0, i
))))
2748 else if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2749 && GET_CODE (SET_SRC (XVECEXP (p2
, 0, i
))) == ASM_OPERANDS
)
2754 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2755 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2756 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2761 subst_low_luid
= DF_INSN_LUID (i2
);
2763 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2764 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2765 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2766 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2768 /* Replace the dest in I2 with our dest and make the resulting
2769 insn the new pattern for I3. Then skip to where we validate
2770 the pattern. Everything was set up above. */
2771 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2773 i3_subst_into_i2
= 1;
2774 goto validate_replacement
;
2778 /* If I2 is setting a pseudo to a constant and I3 is setting some
2779 sub-part of it to another constant, merge them by making a new
2782 && (temp_expr
= single_set (i2
)) != 0
2783 && is_a
<scalar_int_mode
> (GET_MODE (SET_DEST (temp_expr
)), &temp_mode
)
2784 && CONST_SCALAR_INT_P (SET_SRC (temp_expr
))
2785 && GET_CODE (PATTERN (i3
)) == SET
2786 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3
)))
2787 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp_expr
)))
2789 rtx dest
= SET_DEST (PATTERN (i3
));
2790 rtx temp_dest
= SET_DEST (temp_expr
);
2794 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2796 if (CONST_INT_P (XEXP (dest
, 1))
2797 && CONST_INT_P (XEXP (dest
, 2))
2798 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (dest
, 0)),
2801 width
= INTVAL (XEXP (dest
, 1));
2802 offset
= INTVAL (XEXP (dest
, 2));
2803 dest
= XEXP (dest
, 0);
2804 if (BITS_BIG_ENDIAN
)
2805 offset
= GET_MODE_PRECISION (dest_mode
) - width
- offset
;
2810 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2811 dest
= XEXP (dest
, 0);
2812 if (is_a
<scalar_int_mode
> (GET_MODE (dest
), &dest_mode
))
2814 width
= GET_MODE_PRECISION (dest_mode
);
2821 /* If this is the low part, we're done. */
2822 if (subreg_lowpart_p (dest
))
2824 /* Handle the case where inner is twice the size of outer. */
2825 else if (GET_MODE_PRECISION (temp_mode
)
2826 == 2 * GET_MODE_PRECISION (dest_mode
))
2827 offset
+= GET_MODE_PRECISION (dest_mode
);
2828 /* Otherwise give up for now. */
2835 rtx inner
= SET_SRC (PATTERN (i3
));
2836 rtx outer
= SET_SRC (temp_expr
);
2838 wide_int o
= wi::insert (rtx_mode_t (outer
, temp_mode
),
2839 rtx_mode_t (inner
, dest_mode
),
2844 subst_low_luid
= DF_INSN_LUID (i2
);
2845 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2847 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2849 /* Replace the source in I2 with the new constant and make the
2850 resulting insn the new pattern for I3. Then skip to where we
2851 validate the pattern. Everything was set up above. */
2852 SUBST (SET_SRC (temp_expr
),
2853 immed_wide_int_const (o
, temp_mode
));
2855 newpat
= PATTERN (i2
);
2857 /* The dest of I3 has been replaced with the dest of I2. */
2858 changed_i3_dest
= 1;
2859 goto validate_replacement
;
2863 /* If we have no I1 and I2 looks like:
2864 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2866 make up a dummy I1 that is
2869 (set (reg:CC X) (compare:CC Y (const_int 0)))
2871 (We can ignore any trailing CLOBBERs.)
2873 This undoes a previous combination and allows us to match a branch-and-
2877 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2878 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
2880 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
2881 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
2882 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
2883 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1)))
2884 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2885 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2887 /* We make I1 with the same INSN_UID as I2. This gives it
2888 the same DF_INSN_LUID for value tracking. Our fake I1 will
2889 never appear in the insn stream so giving it the same INSN_UID
2890 as I2 will not cause a problem. */
2892 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2893 XVECEXP (PATTERN (i2
), 0, 1), INSN_LOCATION (i2
),
2895 INSN_UID (i1
) = INSN_UID (i2
);
2897 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
2898 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
2899 SET_DEST (PATTERN (i1
)));
2900 unsigned int regno
= REGNO (SET_DEST (PATTERN (i1
)));
2901 SUBST_LINK (LOG_LINKS (i2
),
2902 alloc_insn_link (i1
, regno
, LOG_LINKS (i2
)));
2905 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2906 make those two SETs separate I1 and I2 insns, and make an I0 that is
2909 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2910 && can_split_parallel_of_n_reg_sets (i2
, 2)
2911 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2912 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
)
2913 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2914 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2916 /* If there is no I1, there is no I0 either. */
2919 /* We make I1 with the same INSN_UID as I2. This gives it
2920 the same DF_INSN_LUID for value tracking. Our fake I1 will
2921 never appear in the insn stream so giving it the same INSN_UID
2922 as I2 will not cause a problem. */
2924 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2925 XVECEXP (PATTERN (i2
), 0, 0), INSN_LOCATION (i2
),
2927 INSN_UID (i1
) = INSN_UID (i2
);
2929 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 1));
2932 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
2933 if (!can_combine_p (i2
, i3
, i0
, i1
, NULL
, NULL
, &i2dest
, &i2src
))
2935 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2936 fprintf (dump_file
, "Can't combine i2 into i3\n");
2940 if (i1
&& !can_combine_p (i1
, i3
, i0
, NULL
, i2
, NULL
, &i1dest
, &i1src
))
2942 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2943 fprintf (dump_file
, "Can't combine i1 into i3\n");
2947 if (i0
&& !can_combine_p (i0
, i3
, NULL
, NULL
, i1
, i2
, &i0dest
, &i0src
))
2949 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2950 fprintf (dump_file
, "Can't combine i0 into i3\n");
2955 /* Record whether i2 and i3 are trivial moves. */
2956 i2_was_move
= is_just_move (i2
);
2957 i3_was_move
= is_just_move (i3
);
2959 /* Record whether I2DEST is used in I2SRC and similarly for the other
2960 cases. Knowing this will help in register status updating below. */
2961 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
2962 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
2963 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
2964 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
2965 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
2966 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
2967 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2968 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
2969 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
2971 /* For the earlier insns, determine which of the subsequent ones they
2973 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
2974 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
2975 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
2976 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
2977 && reg_overlap_mentioned_p (i0dest
, i2src
))));
2979 /* Ensure that I3's pattern can be the destination of combines. */
2980 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
2981 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
2982 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
2983 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
2990 /* See if any of the insns is a MULT operation. Unless one is, we will
2991 reject a combination that is, since it must be slower. Be conservative
2993 if (GET_CODE (i2src
) == MULT
2994 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
2995 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
2996 || (GET_CODE (PATTERN (i3
)) == SET
2997 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
3000 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3001 We used to do this EXCEPT in one case: I3 has a post-inc in an
3002 output operand. However, that exception can give rise to insns like
3004 which is a famous insn on the PDP-11 where the value of r3 used as the
3005 source was model-dependent. Avoid this sort of thing. */
3008 if (!(GET_CODE (PATTERN (i3
)) == SET
3009 && REG_P (SET_SRC (PATTERN (i3
)))
3010 && MEM_P (SET_DEST (PATTERN (i3
)))
3011 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
3012 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
3013 /* It's not the exception. */
3018 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
3019 if (REG_NOTE_KIND (link
) == REG_INC
3020 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
3022 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
3029 /* See if the SETs in I1 or I2 need to be kept around in the merged
3030 instruction: whenever the value set there is still needed past I3.
3031 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3033 For the SET in I1, we have two cases: if I1 and I2 independently feed
3034 into I3, the set in I1 needs to be kept around unless I1DEST dies
3035 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3036 in I1 needs to be kept around unless I1DEST dies or is set in either
3037 I2 or I3. The same considerations apply to I0. */
3039 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
3042 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
3043 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
3048 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
3049 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
))
3050 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3051 && dead_or_set_p (i2
, i0dest
)));
3055 /* We are about to copy insns for the case where they need to be kept
3056 around. Check that they can be copied in the merged instruction. */
3058 if (targetm
.cannot_copy_insn_p
3059 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
3060 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
3061 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
3067 /* We cannot safely duplicate volatile references in any case. */
3069 if ((added_sets_2
&& volatile_refs_p (PATTERN (i2
)))
3070 || (added_sets_1
&& volatile_refs_p (PATTERN (i1
)))
3071 || (added_sets_0
&& volatile_refs_p (PATTERN (i0
))))
3077 /* Count how many auto_inc expressions there were in the original insns;
3078 we need to have the same number in the resulting patterns. */
3081 for_each_inc_dec (PATTERN (i0
), count_auto_inc
, &n_auto_inc
);
3083 for_each_inc_dec (PATTERN (i1
), count_auto_inc
, &n_auto_inc
);
3084 for_each_inc_dec (PATTERN (i2
), count_auto_inc
, &n_auto_inc
);
3085 for_each_inc_dec (PATTERN (i3
), count_auto_inc
, &n_auto_inc
);
3087 /* If the set in I2 needs to be kept around, we must make a copy of
3088 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3089 PATTERN (I2), we are only substituting for the original I1DEST, not into
3090 an already-substituted copy. This also prevents making self-referential
3091 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3096 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3097 i2pat
= gen_rtx_SET (i2dest
, copy_rtx (i2src
));
3099 i2pat
= copy_rtx (PATTERN (i2
));
3104 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3105 i1pat
= gen_rtx_SET (i1dest
, copy_rtx (i1src
));
3107 i1pat
= copy_rtx (PATTERN (i1
));
3112 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3113 i0pat
= gen_rtx_SET (i0dest
, copy_rtx (i0src
));
3115 i0pat
= copy_rtx (PATTERN (i0
));
3120 /* Substitute in the latest insn for the regs set by the earlier ones. */
3122 maxreg
= max_reg_num ();
3126 /* Many machines have insns that can both perform an
3127 arithmetic operation and set the condition code. These operations will
3128 be represented as a PARALLEL with the first element of the vector
3129 being a COMPARE of an arithmetic operation with the constant zero.
3130 The second element of the vector will set some pseudo to the result
3131 of the same arithmetic operation. If we simplify the COMPARE, we won't
3132 match such a pattern and so will generate an extra insn. Here we test
3133 for this case, where both the comparison and the operation result are
3134 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3135 I2SRC. Later we will make the PARALLEL that contains I2. */
3137 if (i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3138 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3139 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3
)), 1))
3140 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3143 rtx
*cc_use_loc
= NULL
;
3144 rtx_insn
*cc_use_insn
= NULL
;
3145 rtx op0
= i2src
, op1
= XEXP (SET_SRC (PATTERN (i3
)), 1);
3146 machine_mode compare_mode
, orig_compare_mode
;
3147 enum rtx_code compare_code
= UNKNOWN
, orig_compare_code
= UNKNOWN
;
3148 scalar_int_mode mode
;
3150 newpat
= PATTERN (i3
);
3151 newpat_dest
= SET_DEST (newpat
);
3152 compare_mode
= orig_compare_mode
= GET_MODE (newpat_dest
);
3154 if (undobuf
.other_insn
== 0
3155 && (cc_use_loc
= find_single_use (SET_DEST (newpat
), i3
,
3158 compare_code
= orig_compare_code
= GET_CODE (*cc_use_loc
);
3159 if (is_a
<scalar_int_mode
> (GET_MODE (i2dest
), &mode
))
3160 compare_code
= simplify_compare_const (compare_code
, mode
,
3162 target_canonicalize_comparison (&compare_code
, &op0
, &op1
, 1);
3165 /* Do the rest only if op1 is const0_rtx, which may be the
3166 result of simplification. */
3167 if (op1
== const0_rtx
)
3169 /* If a single use of the CC is found, prepare to modify it
3170 when SELECT_CC_MODE returns a new CC-class mode, or when
3171 the above simplify_compare_const() returned a new comparison
3172 operator. undobuf.other_insn is assigned the CC use insn
3173 when modifying it. */
3176 #ifdef SELECT_CC_MODE
3177 machine_mode new_mode
3178 = SELECT_CC_MODE (compare_code
, op0
, op1
);
3179 if (new_mode
!= orig_compare_mode
3180 && can_change_dest_mode (SET_DEST (newpat
),
3181 added_sets_2
, new_mode
))
3183 unsigned int regno
= REGNO (newpat_dest
);
3184 compare_mode
= new_mode
;
3185 if (regno
< FIRST_PSEUDO_REGISTER
)
3186 newpat_dest
= gen_rtx_REG (compare_mode
, regno
);
3189 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3190 newpat_dest
= regno_reg_rtx
[regno
];
3194 /* Cases for modifying the CC-using comparison. */
3195 if (compare_code
!= orig_compare_code
3196 /* ??? Do we need to verify the zero rtx? */
3197 && XEXP (*cc_use_loc
, 1) == const0_rtx
)
3199 /* Replace cc_use_loc with entire new RTX. */
3201 gen_rtx_fmt_ee (compare_code
, GET_MODE (*cc_use_loc
),
3202 newpat_dest
, const0_rtx
));
3203 undobuf
.other_insn
= cc_use_insn
;
3205 else if (compare_mode
!= orig_compare_mode
)
3207 /* Just replace the CC reg with a new mode. */
3208 SUBST (XEXP (*cc_use_loc
, 0), newpat_dest
);
3209 undobuf
.other_insn
= cc_use_insn
;
3213 /* Now we modify the current newpat:
3214 First, SET_DEST(newpat) is updated if the CC mode has been
3215 altered. For targets without SELECT_CC_MODE, this should be
3217 if (compare_mode
!= orig_compare_mode
)
3218 SUBST (SET_DEST (newpat
), newpat_dest
);
3219 /* This is always done to propagate i2src into newpat. */
3220 SUBST (SET_SRC (newpat
),
3221 gen_rtx_COMPARE (compare_mode
, op0
, op1
));
3222 /* Create new version of i2pat if needed; the below PARALLEL
3223 creation needs this to work correctly. */
3224 if (! rtx_equal_p (i2src
, op0
))
3225 i2pat
= gen_rtx_SET (i2dest
, op0
);
3230 if (i2_is_used
== 0)
3232 /* It is possible that the source of I2 or I1 may be performing
3233 an unneeded operation, such as a ZERO_EXTEND of something
3234 that is known to have the high part zero. Handle that case
3235 by letting subst look at the inner insns.
3237 Another way to do this would be to have a function that tries
3238 to simplify a single insn instead of merging two or more
3239 insns. We don't do this because of the potential of infinite
3240 loops and because of the potential extra memory required.
3241 However, doing it the way we are is a bit of a kludge and
3242 doesn't catch all cases.
3244 But only do this if -fexpensive-optimizations since it slows
3245 things down and doesn't usually win.
3247 This is not done in the COMPARE case above because the
3248 unmodified I2PAT is used in the PARALLEL and so a pattern
3249 with a modified I2SRC would not match. */
3251 if (flag_expensive_optimizations
)
3253 /* Pass pc_rtx so no substitutions are done, just
3257 subst_low_luid
= DF_INSN_LUID (i1
);
3258 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3261 subst_low_luid
= DF_INSN_LUID (i2
);
3262 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3265 n_occurrences
= 0; /* `subst' counts here */
3266 subst_low_luid
= DF_INSN_LUID (i2
);
3268 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3269 copy of I2SRC each time we substitute it, in order to avoid creating
3270 self-referential RTL when we will be substituting I1SRC for I1DEST
3271 later. Likewise if I0 feeds into I2, either directly or indirectly
3272 through I1, and I0DEST is in I0SRC. */
3273 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0, 0,
3274 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3275 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3276 && i0dest_in_i0src
));
3279 /* Record whether I2's body now appears within I3's body. */
3280 i2_is_used
= n_occurrences
;
3283 /* If we already got a failure, don't try to do more. Otherwise, try to
3284 substitute I1 if we have it. */
3286 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3288 /* Before we can do this substitution, we must redo the test done
3289 above (see detailed comments there) that ensures I1DEST isn't
3290 mentioned in any SETs in NEWPAT that are field assignments. */
3291 if (!combinable_i3pat (NULL
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3299 subst_low_luid
= DF_INSN_LUID (i1
);
3301 /* If the following substitution will modify I1SRC, make a copy of it
3302 for the case where it is substituted for I1DEST in I2PAT later. */
3303 if (added_sets_2
&& i1_feeds_i2_n
)
3304 i1src_copy
= copy_rtx (i1src
);
3306 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3307 copy of I1SRC each time we substitute it, in order to avoid creating
3308 self-referential RTL when we will be substituting I0SRC for I0DEST
3310 newpat
= subst (newpat
, i1dest
, i1src
, 0, 0,
3311 i0_feeds_i1_n
&& i0dest_in_i0src
);
3314 /* Record whether I1's body now appears within I3's body. */
3315 i1_is_used
= n_occurrences
;
3318 /* Likewise for I0 if we have it. */
3320 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3322 if (!combinable_i3pat (NULL
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3329 /* If the following substitution will modify I0SRC, make a copy of it
3330 for the case where it is substituted for I0DEST in I1PAT later. */
3331 if (added_sets_1
&& i0_feeds_i1_n
)
3332 i0src_copy
= copy_rtx (i0src
);
3333 /* And a copy for I0DEST in I2PAT substitution. */
3334 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3335 || (i0_feeds_i2_n
)))
3336 i0src_copy2
= copy_rtx (i0src
);
3339 subst_low_luid
= DF_INSN_LUID (i0
);
3340 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0, 0);
3346 int new_n_auto_inc
= 0;
3347 for_each_inc_dec (newpat
, count_auto_inc
, &new_n_auto_inc
);
3349 if (n_auto_inc
!= new_n_auto_inc
)
3351 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3352 fprintf (dump_file
, "Number of auto_inc expressions changed\n");
3358 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3359 to count all the ways that I2SRC and I1SRC can be used. */
3360 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3361 && i2_is_used
+ added_sets_2
> 1)
3362 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3363 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3365 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3366 && (n_occurrences
+ added_sets_0
3367 + (added_sets_1
&& i0_feeds_i1_n
)
3368 + (added_sets_2
&& i0_feeds_i2_n
)
3370 /* Fail if we tried to make a new register. */
3371 || max_reg_num () != maxreg
3372 /* Fail if we couldn't do something and have a CLOBBER. */
3373 || GET_CODE (newpat
) == CLOBBER
3374 /* Fail if this new pattern is a MULT and we didn't have one before
3375 at the outer level. */
3376 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3383 /* If the actions of the earlier insns must be kept
3384 in addition to substituting them into the latest one,
3385 we must make a new PARALLEL for the latest insn
3386 to hold additional the SETs. */
3388 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3390 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3393 if (GET_CODE (newpat
) == PARALLEL
)
3395 rtvec old
= XVEC (newpat
, 0);
3396 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3397 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3398 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3399 sizeof (old
->elem
[0]) * old
->num_elem
);
3404 total_sets
= 1 + extra_sets
;
3405 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3406 XVECEXP (newpat
, 0, 0) = old
;
3410 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3416 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0, 0);
3418 XVECEXP (newpat
, 0, --total_sets
) = t
;
3424 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0, 0,
3425 i0_feeds_i1_n
&& i0dest_in_i0src
);
3426 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3427 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0, 0);
3429 XVECEXP (newpat
, 0, --total_sets
) = t
;
3433 validate_replacement
:
3435 /* Note which hard regs this insn has as inputs. */
3436 mark_used_regs_combine (newpat
);
3438 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3439 consider splitting this pattern, we might need these clobbers. */
3440 if (i1
&& GET_CODE (newpat
) == PARALLEL
3441 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3443 int len
= XVECLEN (newpat
, 0);
3445 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3446 for (i
= 0; i
< len
; i
++)
3447 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3450 /* We have recognized nothing yet. */
3451 insn_code_number
= -1;
3453 /* See if this is a PARALLEL of two SETs where one SET's destination is
3454 a register that is unused and this isn't marked as an instruction that
3455 might trap in an EH region. In that case, we just need the other SET.
3456 We prefer this over the PARALLEL.
3458 This can occur when simplifying a divmod insn. We *must* test for this
3459 case here because the code below that splits two independent SETs doesn't
3460 handle this case correctly when it updates the register status.
3462 It's pointless doing this if we originally had two sets, one from
3463 i3, and one from i2. Combining then splitting the parallel results
3464 in the original i2 again plus an invalid insn (which we delete).
3465 The net effect is only to move instructions around, which makes
3466 debug info less accurate.
3468 If the remaining SET came from I2 its destination should not be used
3469 between I2 and I3. See PR82024. */
3471 if (!(added_sets_2
&& i1
== 0)
3472 && is_parallel_of_n_reg_sets (newpat
, 2)
3473 && asm_noperands (newpat
) < 0)
3475 rtx set0
= XVECEXP (newpat
, 0, 0);
3476 rtx set1
= XVECEXP (newpat
, 0, 1);
3477 rtx oldpat
= newpat
;
3479 if (((REG_P (SET_DEST (set1
))
3480 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3481 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3482 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3483 && insn_nothrow_p (i3
)
3484 && !side_effects_p (SET_SRC (set1
)))
3487 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3490 else if (((REG_P (SET_DEST (set0
))
3491 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3492 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3493 && find_reg_note (i3
, REG_UNUSED
,
3494 SUBREG_REG (SET_DEST (set0
)))))
3495 && insn_nothrow_p (i3
)
3496 && !side_effects_p (SET_SRC (set0
)))
3498 rtx dest
= SET_DEST (set1
);
3499 if (GET_CODE (dest
) == SUBREG
)
3500 dest
= SUBREG_REG (dest
);
3501 if (!reg_used_between_p (dest
, i2
, i3
))
3504 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3506 if (insn_code_number
>= 0)
3507 changed_i3_dest
= 1;
3511 if (insn_code_number
< 0)
3515 /* Is the result of combination a valid instruction? */
3516 if (insn_code_number
< 0)
3517 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3519 /* If we were combining three insns and the result is a simple SET
3520 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3521 insns. There are two ways to do this. It can be split using a
3522 machine-specific method (like when you have an addition of a large
3523 constant) or by combine in the function find_split_point. */
3525 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3526 && asm_noperands (newpat
) < 0)
3528 rtx parallel
, *split
;
3529 rtx_insn
*m_split_insn
;
3531 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3532 use I2DEST as a scratch register will help. In the latter case,
3533 convert I2DEST to the mode of the source of NEWPAT if we can. */
3535 m_split_insn
= combine_split_insns (newpat
, i3
);
3537 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3538 inputs of NEWPAT. */
3540 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3541 possible to try that as a scratch reg. This would require adding
3542 more code to make it work though. */
3544 if (m_split_insn
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3546 machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3548 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3549 (temporarily, until we are committed to this instruction
3550 combination) does not work: for example, any call to nonzero_bits
3551 on the register (from a splitter in the MD file, for example)
3552 will get the old information, which is invalid.
3554 Since nowadays we can create registers during combine just fine,
3555 we should just create a new one here, not reuse i2dest. */
3557 /* First try to split using the original register as a
3558 scratch register. */
3559 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3560 gen_rtvec (2, newpat
,
3561 gen_rtx_CLOBBER (VOIDmode
,
3563 m_split_insn
= combine_split_insns (parallel
, i3
);
3565 /* If that didn't work, try changing the mode of I2DEST if
3567 if (m_split_insn
== 0
3568 && new_mode
!= GET_MODE (i2dest
)
3569 && new_mode
!= VOIDmode
3570 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3572 machine_mode old_mode
= GET_MODE (i2dest
);
3575 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3576 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3579 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3580 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3583 parallel
= (gen_rtx_PARALLEL
3585 gen_rtvec (2, newpat
,
3586 gen_rtx_CLOBBER (VOIDmode
,
3588 m_split_insn
= combine_split_insns (parallel
, i3
);
3590 if (m_split_insn
== 0
3591 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3595 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3596 buf
= undobuf
.undos
;
3597 undobuf
.undos
= buf
->next
;
3598 buf
->next
= undobuf
.frees
;
3599 undobuf
.frees
= buf
;
3603 i2scratch
= m_split_insn
!= 0;
3606 /* If recog_for_combine has discarded clobbers, try to use them
3607 again for the split. */
3608 if (m_split_insn
== 0 && newpat_vec_with_clobbers
)
3610 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3611 m_split_insn
= combine_split_insns (parallel
, i3
);
3614 if (m_split_insn
&& NEXT_INSN (m_split_insn
) == NULL_RTX
)
3616 rtx m_split_pat
= PATTERN (m_split_insn
);
3617 insn_code_number
= recog_for_combine (&m_split_pat
, i3
, &new_i3_notes
);
3618 if (insn_code_number
>= 0)
3619 newpat
= m_split_pat
;
3621 else if (m_split_insn
&& NEXT_INSN (NEXT_INSN (m_split_insn
)) == NULL_RTX
3622 && (next_nonnote_nondebug_insn (i2
) == i3
3623 || !modified_between_p (PATTERN (m_split_insn
), i2
, i3
)))
3626 rtx newi3pat
= PATTERN (NEXT_INSN (m_split_insn
));
3627 newi2pat
= PATTERN (m_split_insn
);
3629 i3set
= single_set (NEXT_INSN (m_split_insn
));
3630 i2set
= single_set (m_split_insn
);
3632 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3634 /* If I2 or I3 has multiple SETs, we won't know how to track
3635 register status, so don't use these insns. If I2's destination
3636 is used between I2 and I3, we also can't use these insns. */
3638 if (i2_code_number
>= 0 && i2set
&& i3set
3639 && (next_nonnote_nondebug_insn (i2
) == i3
3640 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3641 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3643 if (insn_code_number
>= 0)
3646 /* It is possible that both insns now set the destination of I3.
3647 If so, we must show an extra use of it. */
3649 if (insn_code_number
>= 0)
3651 rtx new_i3_dest
= SET_DEST (i3set
);
3652 rtx new_i2_dest
= SET_DEST (i2set
);
3654 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3655 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3656 || GET_CODE (new_i3_dest
) == SUBREG
)
3657 new_i3_dest
= XEXP (new_i3_dest
, 0);
3659 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3660 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3661 || GET_CODE (new_i2_dest
) == SUBREG
)
3662 new_i2_dest
= XEXP (new_i2_dest
, 0);
3664 if (REG_P (new_i3_dest
)
3665 && REG_P (new_i2_dest
)
3666 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
)
3667 && REGNO (new_i2_dest
) < reg_n_sets_max
)
3668 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3672 /* If we can split it and use I2DEST, go ahead and see if that
3673 helps things be recognized. Verify that none of the registers
3674 are set between I2 and I3. */
3675 if (insn_code_number
< 0
3676 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3677 /* We need I2DEST in the proper mode. If it is a hard register
3678 or the only use of a pseudo, we can change its mode.
3679 Make sure we don't change a hard register to have a mode that
3680 isn't valid for it, or change the number of registers. */
3681 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3682 || GET_MODE (*split
) == VOIDmode
3683 || can_change_dest_mode (i2dest
, added_sets_2
,
3685 && (next_nonnote_nondebug_insn (i2
) == i3
3686 || !modified_between_p (*split
, i2
, i3
))
3687 /* We can't overwrite I2DEST if its value is still used by
3689 && ! reg_referenced_p (i2dest
, newpat
))
3691 rtx newdest
= i2dest
;
3692 enum rtx_code split_code
= GET_CODE (*split
);
3693 machine_mode split_mode
= GET_MODE (*split
);
3694 bool subst_done
= false;
3695 newi2pat
= NULL_RTX
;
3699 /* *SPLIT may be part of I2SRC, so make sure we have the
3700 original expression around for later debug processing.
3701 We should not need I2SRC any more in other cases. */
3702 if (MAY_HAVE_DEBUG_BIND_INSNS
)
3703 i2src
= copy_rtx (i2src
);
3707 /* Get NEWDEST as a register in the proper mode. We have already
3708 validated that we can do this. */
3709 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3711 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3712 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3715 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3716 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3720 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3721 an ASHIFT. This can occur if it was inside a PLUS and hence
3722 appeared to be a memory address. This is a kludge. */
3723 if (split_code
== MULT
3724 && CONST_INT_P (XEXP (*split
, 1))
3725 && INTVAL (XEXP (*split
, 1)) > 0
3726 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3728 rtx i_rtx
= gen_int_shift_amount (split_mode
, i
);
3729 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3730 XEXP (*split
, 0), i_rtx
));
3731 /* Update split_code because we may not have a multiply
3733 split_code
= GET_CODE (*split
);
3736 /* Similarly for (plus (mult FOO (const_int pow2))). */
3737 if (split_code
== PLUS
3738 && GET_CODE (XEXP (*split
, 0)) == MULT
3739 && CONST_INT_P (XEXP (XEXP (*split
, 0), 1))
3740 && INTVAL (XEXP (XEXP (*split
, 0), 1)) > 0
3741 && (i
= exact_log2 (UINTVAL (XEXP (XEXP (*split
, 0), 1)))) >= 0)
3743 rtx nsplit
= XEXP (*split
, 0);
3744 rtx i_rtx
= gen_int_shift_amount (GET_MODE (nsplit
), i
);
3745 SUBST (XEXP (*split
, 0), gen_rtx_ASHIFT (GET_MODE (nsplit
),
3748 /* Update split_code because we may not have a multiply
3750 split_code
= GET_CODE (*split
);
3753 #ifdef INSN_SCHEDULING
3754 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3755 be written as a ZERO_EXTEND. */
3756 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3758 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3759 what it really is. */
3760 if (load_extend_op (GET_MODE (SUBREG_REG (*split
)))
3762 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3763 SUBREG_REG (*split
)));
3765 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3766 SUBREG_REG (*split
)));
3770 /* Attempt to split binary operators using arithmetic identities. */
3771 if (BINARY_P (SET_SRC (newpat
))
3772 && split_mode
== GET_MODE (SET_SRC (newpat
))
3773 && ! side_effects_p (SET_SRC (newpat
)))
3775 rtx setsrc
= SET_SRC (newpat
);
3776 machine_mode mode
= GET_MODE (setsrc
);
3777 enum rtx_code code
= GET_CODE (setsrc
);
3778 rtx src_op0
= XEXP (setsrc
, 0);
3779 rtx src_op1
= XEXP (setsrc
, 1);
3781 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3782 if (rtx_equal_p (src_op0
, src_op1
))
3784 newi2pat
= gen_rtx_SET (newdest
, src_op0
);
3785 SUBST (XEXP (setsrc
, 0), newdest
);
3786 SUBST (XEXP (setsrc
, 1), newdest
);
3789 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3790 else if ((code
== PLUS
|| code
== MULT
)
3791 && GET_CODE (src_op0
) == code
3792 && GET_CODE (XEXP (src_op0
, 0)) == code
3793 && (INTEGRAL_MODE_P (mode
)
3794 || (FLOAT_MODE_P (mode
)
3795 && flag_unsafe_math_optimizations
)))
3797 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3798 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3799 rtx r
= XEXP (src_op0
, 1);
3802 /* Split both "((X op Y) op X) op Y" and
3803 "((X op Y) op Y) op X" as "T op T" where T is
3805 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3806 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3808 newi2pat
= gen_rtx_SET (newdest
, XEXP (src_op0
, 0));
3809 SUBST (XEXP (setsrc
, 0), newdest
);
3810 SUBST (XEXP (setsrc
, 1), newdest
);
3813 /* Split "((X op X) op Y) op Y)" as "T op T" where
3815 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3817 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3818 newi2pat
= gen_rtx_SET (newdest
, tmp
);
3819 SUBST (XEXP (setsrc
, 0), newdest
);
3820 SUBST (XEXP (setsrc
, 1), newdest
);
3828 newi2pat
= gen_rtx_SET (newdest
, *split
);
3829 SUBST (*split
, newdest
);
3832 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3834 /* recog_for_combine might have added CLOBBERs to newi2pat.
3835 Make sure NEWPAT does not depend on the clobbered regs. */
3836 if (GET_CODE (newi2pat
) == PARALLEL
)
3837 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3838 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3840 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3841 if (reg_overlap_mentioned_p (reg
, newpat
))
3848 /* If the split point was a MULT and we didn't have one before,
3849 don't use one now. */
3850 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3851 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3855 /* Check for a case where we loaded from memory in a narrow mode and
3856 then sign extended it, but we need both registers. In that case,
3857 we have a PARALLEL with both loads from the same memory location.
3858 We can split this into a load from memory followed by a register-register
3859 copy. This saves at least one insn, more if register allocation can
3862 We cannot do this if the destination of the first assignment is a
3863 condition code register. We eliminate this case by making sure
3864 the SET_DEST and SET_SRC have the same mode.
3866 We cannot do this if the destination of the second assignment is
3867 a register that we have already assumed is zero-extended. Similarly
3868 for a SUBREG of such a register. */
3870 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
3871 && GET_CODE (newpat
) == PARALLEL
3872 && XVECLEN (newpat
, 0) == 2
3873 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3874 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
3875 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
3876 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
3877 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3878 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3879 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
3880 && !modified_between_p (SET_SRC (XVECEXP (newpat
, 0, 1)), i2
, i3
)
3881 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3882 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3883 && ! (temp_expr
= SET_DEST (XVECEXP (newpat
, 0, 1)),
3885 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3886 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
3888 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
3890 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3891 != GET_MODE_MASK (word_mode
))))
3892 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
3893 && (temp_expr
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
3895 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3896 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
3898 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
3900 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3901 != GET_MODE_MASK (word_mode
)))))
3902 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3903 SET_SRC (XVECEXP (newpat
, 0, 1)))
3904 && ! find_reg_note (i3
, REG_UNUSED
,
3905 SET_DEST (XVECEXP (newpat
, 0, 0))))
3909 newi2pat
= XVECEXP (newpat
, 0, 0);
3910 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
3911 newpat
= XVECEXP (newpat
, 0, 1);
3912 SUBST (SET_SRC (newpat
),
3913 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
3914 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3916 if (i2_code_number
>= 0)
3917 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3919 if (insn_code_number
>= 0)
3923 /* Similarly, check for a case where we have a PARALLEL of two independent
3924 SETs but we started with three insns. In this case, we can do the sets
3925 as two separate insns. This case occurs when some SET allows two
3926 other insns to combine, but the destination of that SET is still live.
3928 Also do this if we started with two insns and (at least) one of the
3929 resulting sets is a noop; this noop will be deleted later.
3931 Also do this if we started with two insns neither of which was a simple
3934 else if (insn_code_number
< 0 && asm_noperands (newpat
) < 0
3935 && GET_CODE (newpat
) == PARALLEL
3936 && XVECLEN (newpat
, 0) == 2
3937 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3938 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3940 || set_noop_p (XVECEXP (newpat
, 0, 0))
3941 || set_noop_p (XVECEXP (newpat
, 0, 1))
3942 || (!i2_was_move
&& !i3_was_move
))
3943 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
3944 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
3945 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3946 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3947 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3948 XVECEXP (newpat
, 0, 0))
3949 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
3950 XVECEXP (newpat
, 0, 1))
3951 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
3952 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
3954 rtx set0
= XVECEXP (newpat
, 0, 0);
3955 rtx set1
= XVECEXP (newpat
, 0, 1);
3957 /* Normally, it doesn't matter which of the two is done first, but
3958 one which uses any regs/memory set in between i2 and i3 can't
3959 be first. The PARALLEL might also have been pre-existing in i3,
3960 so we need to make sure that we won't wrongly hoist a SET to i2
3961 that would conflict with a death note present in there, or would
3962 have its dest modified between i2 and i3. */
3963 if (!modified_between_p (SET_SRC (set1
), i2
, i3
)
3964 && !(REG_P (SET_DEST (set1
))
3965 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set1
)))
3966 && !(GET_CODE (SET_DEST (set1
)) == SUBREG
3967 && find_reg_note (i2
, REG_DEAD
,
3968 SUBREG_REG (SET_DEST (set1
))))
3969 && !modified_between_p (SET_DEST (set1
), i2
, i3
)
3970 /* If I3 is a jump, ensure that set0 is a jump so that
3971 we do not create invalid RTL. */
3972 && (!JUMP_P (i3
) || SET_DEST (set0
) == pc_rtx
)
3978 else if (!modified_between_p (SET_SRC (set0
), i2
, i3
)
3979 && !(REG_P (SET_DEST (set0
))
3980 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set0
)))
3981 && !(GET_CODE (SET_DEST (set0
)) == SUBREG
3982 && find_reg_note (i2
, REG_DEAD
,
3983 SUBREG_REG (SET_DEST (set0
))))
3984 && !modified_between_p (SET_DEST (set0
), i2
, i3
)
3985 /* If I3 is a jump, ensure that set1 is a jump so that
3986 we do not create invalid RTL. */
3987 && (!JUMP_P (i3
) || SET_DEST (set1
) == pc_rtx
)
3999 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
4001 if (i2_code_number
>= 0)
4003 /* recog_for_combine might have added CLOBBERs to newi2pat.
4004 Make sure NEWPAT does not depend on the clobbered regs. */
4005 if (GET_CODE (newi2pat
) == PARALLEL
)
4007 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
4008 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
4010 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
4011 if (reg_overlap_mentioned_p (reg
, newpat
))
4019 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4021 /* Likewise, recog_for_combine might have added clobbers to NEWPAT.
4022 Checking that the SET0's SET_DEST and SET1's SET_DEST aren't
4023 mentioned/clobbered, ensures NEWI2PAT's SET_DEST is live. */
4024 if (insn_code_number
>= 0 && GET_CODE (newpat
) == PARALLEL
)
4026 for (i
= XVECLEN (newpat
, 0) - 1; i
>= 0; i
--)
4027 if (GET_CODE (XVECEXP (newpat
, 0, i
)) == CLOBBER
)
4029 rtx reg
= XEXP (XVECEXP (newpat
, 0, i
), 0);
4030 if (reg_overlap_mentioned_p (reg
, SET_DEST (set0
))
4031 || reg_overlap_mentioned_p (reg
, SET_DEST (set1
)))
4039 if (insn_code_number
>= 0)
4044 /* If it still isn't recognized, fail and change things back the way they
4046 if ((insn_code_number
< 0
4047 /* Is the result a reasonable ASM_OPERANDS? */
4048 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
4054 /* If we had to change another insn, make sure it is valid also. */
4055 if (undobuf
.other_insn
)
4057 CLEAR_HARD_REG_SET (newpat_used_regs
);
4059 other_pat
= PATTERN (undobuf
.other_insn
);
4060 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
4063 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
4070 /* Only allow this combination if insn_cost reports that the
4071 replacement instructions are cheaper than the originals. */
4072 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
4078 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4082 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
4083 if (undo
->kind
== UNDO_MODE
)
4085 rtx reg
= *undo
->where
.r
;
4086 machine_mode new_mode
= GET_MODE (reg
);
4087 machine_mode old_mode
= undo
->old_contents
.m
;
4089 /* Temporarily revert mode back. */
4090 adjust_reg_mode (reg
, old_mode
);
4092 if (reg
== i2dest
&& i2scratch
)
4094 /* If we used i2dest as a scratch register with a
4095 different mode, substitute it for the original
4096 i2src while its original mode is temporarily
4097 restored, and then clear i2scratch so that we don't
4098 do it again later. */
4099 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
,
4102 /* Put back the new mode. */
4103 adjust_reg_mode (reg
, new_mode
);
4107 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
4108 rtx_insn
*first
, *last
;
4113 last
= last_combined_insn
;
4118 last
= undobuf
.other_insn
;
4120 if (DF_INSN_LUID (last
)
4121 < DF_INSN_LUID (last_combined_insn
))
4122 last
= last_combined_insn
;
4125 /* We're dealing with a reg that changed mode but not
4126 meaning, so we want to turn it into a subreg for
4127 the new mode. However, because of REG sharing and
4128 because its mode had already changed, we have to do
4129 it in two steps. First, replace any debug uses of
4130 reg, with its original mode temporarily restored,
4131 with this copy we have created; then, replace the
4132 copy with the SUBREG of the original shared reg,
4133 once again changed to the new mode. */
4134 propagate_for_debug (first
, last
, reg
, tempreg
,
4136 adjust_reg_mode (reg
, new_mode
);
4137 propagate_for_debug (first
, last
, tempreg
,
4138 lowpart_subreg (old_mode
, reg
, new_mode
),
4144 /* If we will be able to accept this, we have made a
4145 change to the destination of I3. This requires us to
4146 do a few adjustments. */
4148 if (changed_i3_dest
)
4150 PATTERN (i3
) = newpat
;
4151 adjust_for_new_dest (i3
);
4154 /* We now know that we can do this combination. Merge the insns and
4155 update the status of registers and LOG_LINKS. */
4157 if (undobuf
.other_insn
)
4161 PATTERN (undobuf
.other_insn
) = other_pat
;
4163 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4164 ensure that they are still valid. Then add any non-duplicate
4165 notes added by recog_for_combine. */
4166 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
4168 next
= XEXP (note
, 1);
4170 if ((REG_NOTE_KIND (note
) == REG_DEAD
4171 && !reg_referenced_p (XEXP (note
, 0),
4172 PATTERN (undobuf
.other_insn
)))
4173 ||(REG_NOTE_KIND (note
) == REG_UNUSED
4174 && !reg_set_p (XEXP (note
, 0),
4175 PATTERN (undobuf
.other_insn
)))
4176 /* Simply drop equal note since it may be no longer valid
4177 for other_insn. It may be possible to record that CC
4178 register is changed and only discard those notes, but
4179 in practice it's unnecessary complication and doesn't
4180 give any meaningful improvement.
4183 || REG_NOTE_KIND (note
) == REG_EQUAL
4184 || REG_NOTE_KIND (note
) == REG_EQUIV
)
4185 remove_note (undobuf
.other_insn
, note
);
4188 distribute_notes (new_other_notes
, undobuf
.other_insn
,
4189 undobuf
.other_insn
, NULL
, NULL_RTX
, NULL_RTX
,
4195 /* I3 now uses what used to be its destination and which is now
4196 I2's destination. This requires us to do a few adjustments. */
4197 PATTERN (i3
) = newpat
;
4198 adjust_for_new_dest (i3
);
4201 if (swap_i2i3
|| split_i2i3
)
4203 /* We might need a LOG_LINK from I3 to I2. But then we used to
4204 have one, so we still will.
4206 However, some later insn might be using I2's dest and have
4207 a LOG_LINK pointing at I3. We should change it to point at
4210 /* newi2pat is usually a SET here; however, recog_for_combine might
4211 have added some clobbers. */
4213 if (GET_CODE (x
) == PARALLEL
)
4214 x
= XVECEXP (newi2pat
, 0, 0);
4216 if (REG_P (SET_DEST (x
))
4217 || (GET_CODE (SET_DEST (x
)) == SUBREG
4218 && REG_P (SUBREG_REG (SET_DEST (x
)))))
4220 unsigned int regno
= reg_or_subregno (SET_DEST (x
));
4223 for (rtx_insn
*insn
= NEXT_INSN (i3
);
4226 && NONDEBUG_INSN_P (insn
)
4227 && BLOCK_FOR_INSN (insn
) == this_basic_block
;
4228 insn
= NEXT_INSN (insn
))
4230 struct insn_link
*link
;
4231 FOR_EACH_LOG_LINK (link
, insn
)
4232 if (link
->insn
== i3
&& link
->regno
== regno
)
4243 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4244 struct insn_link
*i3links
, *i2links
, *i1links
= 0, *i0links
= 0;
4247 /* Compute which registers we expect to eliminate. newi2pat may be setting
4248 either i3dest or i2dest, so we must check it. */
4249 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4250 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4253 /* For i1, we need to compute both local elimination and global
4254 elimination information with respect to newi2pat because i1dest
4255 may be the same as i3dest, in which case newi2pat may be setting
4256 i1dest. Global information is used when distributing REG_DEAD
4257 note for i2 and i3, in which case it does matter if newi2pat sets
4260 Local information is used when distributing REG_DEAD note for i1,
4261 in which case it doesn't matter if newi2pat sets i1dest or not.
4262 See PR62151, if we have four insns combination:
4264 i1: r1 <- i1src (using r0)
4266 i2: r0 <- i2src (using r1)
4267 i3: r3 <- i3src (using r0)
4269 From i1's point of view, r0 is eliminated, no matter if it is set
4270 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4271 should be discarded.
4273 Note local information only affects cases in forms like "I1->I2->I3",
4274 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4275 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4277 rtx local_elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4280 rtx elim_i1
= (local_elim_i1
== 0
4281 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4283 /* Same case as i1. */
4284 rtx local_elim_i0
= (i0
== 0 || i0dest_in_i0src
|| !i0dest_killed
4286 rtx elim_i0
= (local_elim_i0
== 0
4287 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4290 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4292 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4293 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4295 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4297 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4299 /* Ensure that we do not have something that should not be shared but
4300 occurs multiple times in the new insns. Check this by first
4301 resetting all the `used' flags and then copying anything is shared. */
4303 reset_used_flags (i3notes
);
4304 reset_used_flags (i2notes
);
4305 reset_used_flags (i1notes
);
4306 reset_used_flags (i0notes
);
4307 reset_used_flags (newpat
);
4308 reset_used_flags (newi2pat
);
4309 if (undobuf
.other_insn
)
4310 reset_used_flags (PATTERN (undobuf
.other_insn
));
4312 i3notes
= copy_rtx_if_shared (i3notes
);
4313 i2notes
= copy_rtx_if_shared (i2notes
);
4314 i1notes
= copy_rtx_if_shared (i1notes
);
4315 i0notes
= copy_rtx_if_shared (i0notes
);
4316 newpat
= copy_rtx_if_shared (newpat
);
4317 newi2pat
= copy_rtx_if_shared (newi2pat
);
4318 if (undobuf
.other_insn
)
4319 reset_used_flags (PATTERN (undobuf
.other_insn
));
4321 INSN_CODE (i3
) = insn_code_number
;
4322 PATTERN (i3
) = newpat
;
4324 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4326 for (rtx link
= CALL_INSN_FUNCTION_USAGE (i3
); link
;
4327 link
= XEXP (link
, 1))
4331 /* I2SRC must still be meaningful at this point. Some
4332 splitting operations can invalidate I2SRC, but those
4333 operations do not apply to calls. */
4335 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4339 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4342 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4347 if (undobuf
.other_insn
)
4348 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4350 /* We had one special case above where I2 had more than one set and
4351 we replaced a destination of one of those sets with the destination
4352 of I3. In that case, we have to update LOG_LINKS of insns later
4353 in this basic block. Note that this (expensive) case is rare.
4355 Also, in this case, we must pretend that all REG_NOTEs for I2
4356 actually came from I3, so that REG_UNUSED notes from I2 will be
4357 properly handled. */
4359 if (i3_subst_into_i2
)
4361 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4362 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4363 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4364 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4365 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4366 && ! find_reg_note (i2
, REG_UNUSED
,
4367 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4368 for (temp_insn
= NEXT_INSN (i2
);
4370 && (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4371 || BB_HEAD (this_basic_block
) != temp_insn
);
4372 temp_insn
= NEXT_INSN (temp_insn
))
4373 if (temp_insn
!= i3
&& NONDEBUG_INSN_P (temp_insn
))
4374 FOR_EACH_LOG_LINK (link
, temp_insn
)
4375 if (link
->insn
== i2
)
4381 while (XEXP (link
, 1))
4382 link
= XEXP (link
, 1);
4383 XEXP (link
, 1) = i2notes
;
4390 LOG_LINKS (i3
) = NULL
;
4392 LOG_LINKS (i2
) = NULL
;
4397 if (MAY_HAVE_DEBUG_BIND_INSNS
&& i2scratch
)
4398 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4400 INSN_CODE (i2
) = i2_code_number
;
4401 PATTERN (i2
) = newi2pat
;
4405 if (MAY_HAVE_DEBUG_BIND_INSNS
&& i2src
)
4406 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4408 SET_INSN_DELETED (i2
);
4413 LOG_LINKS (i1
) = NULL
;
4415 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4416 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
,
4418 SET_INSN_DELETED (i1
);
4423 LOG_LINKS (i0
) = NULL
;
4425 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4426 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
,
4428 SET_INSN_DELETED (i0
);
4431 /* Get death notes for everything that is now used in either I3 or
4432 I2 and used to die in a previous insn. If we built two new
4433 patterns, move from I1 to I2 then I2 to I3 so that we get the
4434 proper movement on registers that I2 modifies. */
4437 from_luid
= DF_INSN_LUID (i0
);
4439 from_luid
= DF_INSN_LUID (i1
);
4441 from_luid
= DF_INSN_LUID (i2
);
4443 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4444 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4446 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4448 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL
,
4449 elim_i2
, elim_i1
, elim_i0
);
4451 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL
,
4452 elim_i2
, elim_i1
, elim_i0
);
4454 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL
,
4455 elim_i2
, local_elim_i1
, local_elim_i0
);
4457 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL
,
4458 elim_i2
, elim_i1
, local_elim_i0
);
4460 distribute_notes (midnotes
, NULL
, i3
, newi2pat
? i2
: NULL
,
4461 elim_i2
, elim_i1
, elim_i0
);
4463 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4464 know these are REG_UNUSED and want them to go to the desired insn,
4465 so we always pass it as i3. */
4467 if (newi2pat
&& new_i2_notes
)
4468 distribute_notes (new_i2_notes
, i2
, i2
, NULL
, NULL_RTX
, NULL_RTX
,
4472 distribute_notes (new_i3_notes
, i3
, i3
, NULL
, NULL_RTX
, NULL_RTX
,
4475 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4476 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4477 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4478 in that case, it might delete I2. Similarly for I2 and I1.
4479 Show an additional death due to the REG_DEAD note we make here. If
4480 we discard it in distribute_notes, we will decrement it again. */
4484 rtx new_note
= alloc_reg_note (REG_DEAD
, i3dest_killed
, NULL_RTX
);
4485 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4486 distribute_notes (new_note
, NULL
, i2
, NULL
, elim_i2
,
4489 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4490 elim_i2
, elim_i1
, elim_i0
);
4493 if (i2dest_in_i2src
)
4495 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4496 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4497 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4498 NULL_RTX
, NULL_RTX
);
4500 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4501 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4504 if (i1dest_in_i1src
)
4506 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4507 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4508 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4509 NULL_RTX
, NULL_RTX
);
4511 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4512 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4515 if (i0dest_in_i0src
)
4517 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4518 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4519 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4520 NULL_RTX
, NULL_RTX
);
4522 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4523 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4526 distribute_links (i3links
);
4527 distribute_links (i2links
);
4528 distribute_links (i1links
);
4529 distribute_links (i0links
);
4533 struct insn_link
*link
;
4534 rtx_insn
*i2_insn
= 0;
4535 rtx i2_val
= 0, set
;
4537 /* The insn that used to set this register doesn't exist, and
4538 this life of the register may not exist either. See if one of
4539 I3's links points to an insn that sets I2DEST. If it does,
4540 that is now the last known value for I2DEST. If we don't update
4541 this and I2 set the register to a value that depended on its old
4542 contents, we will get confused. If this insn is used, thing
4543 will be set correctly in combine_instructions. */
4544 FOR_EACH_LOG_LINK (link
, i3
)
4545 if ((set
= single_set (link
->insn
)) != 0
4546 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4547 i2_insn
= link
->insn
, i2_val
= SET_SRC (set
);
4549 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4551 /* If the reg formerly set in I2 died only once and that was in I3,
4552 zero its use count so it won't make `reload' do any work. */
4554 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4555 && ! i2dest_in_i2src
4556 && REGNO (i2dest
) < reg_n_sets_max
)
4557 INC_REG_N_SETS (REGNO (i2dest
), -1);
4560 if (i1
&& REG_P (i1dest
))
4562 struct insn_link
*link
;
4563 rtx_insn
*i1_insn
= 0;
4564 rtx i1_val
= 0, set
;
4566 FOR_EACH_LOG_LINK (link
, i3
)
4567 if ((set
= single_set (link
->insn
)) != 0
4568 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4569 i1_insn
= link
->insn
, i1_val
= SET_SRC (set
);
4571 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4574 && ! i1dest_in_i1src
4575 && REGNO (i1dest
) < reg_n_sets_max
)
4576 INC_REG_N_SETS (REGNO (i1dest
), -1);
4579 if (i0
&& REG_P (i0dest
))
4581 struct insn_link
*link
;
4582 rtx_insn
*i0_insn
= 0;
4583 rtx i0_val
= 0, set
;
4585 FOR_EACH_LOG_LINK (link
, i3
)
4586 if ((set
= single_set (link
->insn
)) != 0
4587 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4588 i0_insn
= link
->insn
, i0_val
= SET_SRC (set
);
4590 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4593 && ! i0dest_in_i0src
4594 && REGNO (i0dest
) < reg_n_sets_max
)
4595 INC_REG_N_SETS (REGNO (i0dest
), -1);
4598 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4599 been made to this insn. The order is important, because newi2pat
4600 can affect nonzero_bits of newpat. */
4602 note_pattern_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4603 note_pattern_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4606 if (undobuf
.other_insn
!= NULL_RTX
)
4610 fprintf (dump_file
, "modifying other_insn ");
4611 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4613 df_insn_rescan (undobuf
.other_insn
);
4616 if (i0
&& !(NOTE_P (i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4620 fprintf (dump_file
, "modifying insn i0 ");
4621 dump_insn_slim (dump_file
, i0
);
4623 df_insn_rescan (i0
);
4626 if (i1
&& !(NOTE_P (i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4630 fprintf (dump_file
, "modifying insn i1 ");
4631 dump_insn_slim (dump_file
, i1
);
4633 df_insn_rescan (i1
);
4636 if (i2
&& !(NOTE_P (i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4640 fprintf (dump_file
, "modifying insn i2 ");
4641 dump_insn_slim (dump_file
, i2
);
4643 df_insn_rescan (i2
);
4646 if (i3
&& !(NOTE_P (i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4650 fprintf (dump_file
, "modifying insn i3 ");
4651 dump_insn_slim (dump_file
, i3
);
4653 df_insn_rescan (i3
);
4656 /* Set new_direct_jump_p if a new return or simple jump instruction
4657 has been created. Adjust the CFG accordingly. */
4658 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4660 *new_direct_jump_p
= 1;
4661 mark_jump_label (PATTERN (i3
), i3
, 0);
4662 update_cfg_for_uncondjump (i3
);
4665 if (undobuf
.other_insn
!= NULL_RTX
4666 && (returnjump_p (undobuf
.other_insn
)
4667 || any_uncondjump_p (undobuf
.other_insn
)))
4669 *new_direct_jump_p
= 1;
4670 update_cfg_for_uncondjump (undobuf
.other_insn
);
4673 if (GET_CODE (PATTERN (i3
)) == TRAP_IF
4674 && XEXP (PATTERN (i3
), 0) == const1_rtx
)
4676 basic_block bb
= BLOCK_FOR_INSN (i3
);
4678 remove_edge (split_block (bb
, i3
));
4679 emit_barrier_after_bb (bb
);
4680 *new_direct_jump_p
= 1;
4683 if (undobuf
.other_insn
4684 && GET_CODE (PATTERN (undobuf
.other_insn
)) == TRAP_IF
4685 && XEXP (PATTERN (undobuf
.other_insn
), 0) == const1_rtx
)
4687 basic_block bb
= BLOCK_FOR_INSN (undobuf
.other_insn
);
4689 remove_edge (split_block (bb
, undobuf
.other_insn
));
4690 emit_barrier_after_bb (bb
);
4691 *new_direct_jump_p
= 1;
4694 /* A noop might also need cleaning up of CFG, if it comes from the
4695 simplification of a jump. */
4697 && GET_CODE (newpat
) == SET
4698 && SET_SRC (newpat
) == pc_rtx
4699 && SET_DEST (newpat
) == pc_rtx
)
4701 *new_direct_jump_p
= 1;
4702 update_cfg_for_uncondjump (i3
);
4705 if (undobuf
.other_insn
!= NULL_RTX
4706 && JUMP_P (undobuf
.other_insn
)
4707 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4708 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4709 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4711 *new_direct_jump_p
= 1;
4712 update_cfg_for_uncondjump (undobuf
.other_insn
);
4715 combine_successes
++;
4718 rtx_insn
*ret
= newi2pat
? i2
: i3
;
4719 if (added_links_insn
&& DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (ret
))
4720 ret
= added_links_insn
;
4721 if (added_notes_insn
&& DF_INSN_LUID (added_notes_insn
) < DF_INSN_LUID (ret
))
4722 ret
= added_notes_insn
;
4727 /* Get a marker for undoing to the current state. */
4730 get_undo_marker (void)
4732 return undobuf
.undos
;
4735 /* Undo the modifications up to the marker. */
4738 undo_to_marker (void *marker
)
4740 struct undo
*undo
, *next
;
4742 for (undo
= undobuf
.undos
; undo
!= marker
; undo
= next
)
4750 *undo
->where
.r
= undo
->old_contents
.r
;
4753 *undo
->where
.i
= undo
->old_contents
.i
;
4756 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4759 *undo
->where
.l
= undo
->old_contents
.l
;
4765 undo
->next
= undobuf
.frees
;
4766 undobuf
.frees
= undo
;
4769 undobuf
.undos
= (struct undo
*) marker
;
4772 /* Undo all the modifications recorded in undobuf. */
4780 /* We've committed to accepting the changes we made. Move all
4781 of the undos to the free list. */
4786 struct undo
*undo
, *next
;
4788 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4791 undo
->next
= undobuf
.frees
;
4792 undobuf
.frees
= undo
;
4797 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4798 where we have an arithmetic expression and return that point. LOC will
4801 try_combine will call this function to see if an insn can be split into
4805 find_split_point (rtx
*loc
, rtx_insn
*insn
, bool set_src
)
4808 enum rtx_code code
= GET_CODE (x
);
4810 unsigned HOST_WIDE_INT len
= 0;
4811 HOST_WIDE_INT pos
= 0;
4813 rtx inner
= NULL_RTX
;
4814 scalar_int_mode mode
, inner_mode
;
4816 /* First special-case some codes. */
4820 #ifdef INSN_SCHEDULING
4821 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4823 if (MEM_P (SUBREG_REG (x
)))
4826 return find_split_point (&SUBREG_REG (x
), insn
, false);
4829 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4830 using LO_SUM and HIGH. */
4831 if (HAVE_lo_sum
&& (GET_CODE (XEXP (x
, 0)) == CONST
4832 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
))
4834 machine_mode address_mode
= get_address_mode (x
);
4837 gen_rtx_LO_SUM (address_mode
,
4838 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4840 return &XEXP (XEXP (x
, 0), 0);
4843 /* If we have a PLUS whose second operand is a constant and the
4844 address is not valid, perhaps we can split it up using
4845 the machine-specific way to split large constants. We use
4846 the first pseudo-reg (one of the virtual regs) as a placeholder;
4847 it will not remain in the result. */
4848 if (GET_CODE (XEXP (x
, 0)) == PLUS
4849 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4850 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4851 MEM_ADDR_SPACE (x
)))
4853 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4854 rtx_insn
*seq
= combine_split_insns (gen_rtx_SET (reg
, XEXP (x
, 0)),
4857 /* This should have produced two insns, each of which sets our
4858 placeholder. If the source of the second is a valid address,
4859 we can put both sources together and make a split point
4863 && NEXT_INSN (seq
) != NULL_RTX
4864 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
4865 && NONJUMP_INSN_P (seq
)
4866 && GET_CODE (PATTERN (seq
)) == SET
4867 && SET_DEST (PATTERN (seq
)) == reg
4868 && ! reg_mentioned_p (reg
,
4869 SET_SRC (PATTERN (seq
)))
4870 && NONJUMP_INSN_P (NEXT_INSN (seq
))
4871 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
4872 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
4873 && memory_address_addr_space_p
4874 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
4875 MEM_ADDR_SPACE (x
)))
4877 rtx src1
= SET_SRC (PATTERN (seq
));
4878 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
4880 /* Replace the placeholder in SRC2 with SRC1. If we can
4881 find where in SRC2 it was placed, that can become our
4882 split point and we can replace this address with SRC2.
4883 Just try two obvious places. */
4885 src2
= replace_rtx (src2
, reg
, src1
);
4887 if (XEXP (src2
, 0) == src1
)
4888 split
= &XEXP (src2
, 0);
4889 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
4890 && XEXP (XEXP (src2
, 0), 0) == src1
)
4891 split
= &XEXP (XEXP (src2
, 0), 0);
4895 SUBST (XEXP (x
, 0), src2
);
4900 /* If that didn't work and we have a nested plus, like:
4901 ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2
4902 is valid address, try to split (REG1 * CONST1). */
4903 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
4904 && !OBJECT_P (XEXP (XEXP (XEXP (x
, 0), 0), 0))
4905 && OBJECT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
4906 && ! (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SUBREG
4907 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x
, 0),
4910 rtx tem
= XEXP (XEXP (XEXP (x
, 0), 0), 0);
4911 XEXP (XEXP (XEXP (x
, 0), 0), 0) = reg
;
4912 if (memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4913 MEM_ADDR_SPACE (x
)))
4915 XEXP (XEXP (XEXP (x
, 0), 0), 0) = tem
;
4916 return &XEXP (XEXP (XEXP (x
, 0), 0), 0);
4918 XEXP (XEXP (XEXP (x
, 0), 0), 0) = tem
;
4920 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
4921 && OBJECT_P (XEXP (XEXP (XEXP (x
, 0), 0), 0))
4922 && !OBJECT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
4923 && ! (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == SUBREG
4924 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x
, 0),
4927 rtx tem
= XEXP (XEXP (XEXP (x
, 0), 0), 1);
4928 XEXP (XEXP (XEXP (x
, 0), 0), 1) = reg
;
4929 if (memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4930 MEM_ADDR_SPACE (x
)))
4932 XEXP (XEXP (XEXP (x
, 0), 0), 1) = tem
;
4933 return &XEXP (XEXP (XEXP (x
, 0), 0), 1);
4935 XEXP (XEXP (XEXP (x
, 0), 0), 1) = tem
;
4938 /* If that didn't work, perhaps the first operand is complex and
4939 needs to be computed separately, so make a split point there.
4940 This will occur on machines that just support REG + CONST
4941 and have a constant moved through some previous computation. */
4942 if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
4943 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4944 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4945 return &XEXP (XEXP (x
, 0), 0);
4948 /* If we have a PLUS whose first operand is complex, try computing it
4949 separately by making a split there. */
4950 if (GET_CODE (XEXP (x
, 0)) == PLUS
4951 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4953 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
4954 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4955 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4956 return &XEXP (XEXP (x
, 0), 0);
4960 /* See if we can split SET_SRC as it stands. */
4961 split
= find_split_point (&SET_SRC (x
), insn
, true);
4962 if (split
&& split
!= &SET_SRC (x
))
4965 /* See if we can split SET_DEST as it stands. */
4966 split
= find_split_point (&SET_DEST (x
), insn
, false);
4967 if (split
&& split
!= &SET_DEST (x
))
4970 /* See if this is a bitfield assignment with everything constant. If
4971 so, this is an IOR of an AND, so split it into that. */
4972 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
4973 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_DEST (x
), 0)),
4975 && HWI_COMPUTABLE_MODE_P (inner_mode
)
4976 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
4977 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
4978 && CONST_INT_P (SET_SRC (x
))
4979 && ((INTVAL (XEXP (SET_DEST (x
), 1))
4980 + INTVAL (XEXP (SET_DEST (x
), 2)))
4981 <= GET_MODE_PRECISION (inner_mode
))
4982 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
4984 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
4985 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
4986 rtx dest
= XEXP (SET_DEST (x
), 0);
4987 unsigned HOST_WIDE_INT mask
= (HOST_WIDE_INT_1U
<< len
) - 1;
4988 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
)) & mask
;
4991 if (BITS_BIG_ENDIAN
)
4992 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
4994 or_mask
= gen_int_mode (src
<< pos
, inner_mode
);
4997 simplify_gen_binary (IOR
, inner_mode
, dest
, or_mask
));
5000 rtx negmask
= gen_int_mode (~(mask
<< pos
), inner_mode
);
5002 simplify_gen_binary (IOR
, inner_mode
,
5003 simplify_gen_binary (AND
, inner_mode
,
5008 SUBST (SET_DEST (x
), dest
);
5010 split
= find_split_point (&SET_SRC (x
), insn
, true);
5011 if (split
&& split
!= &SET_SRC (x
))
5015 /* Otherwise, see if this is an operation that we can split into two.
5016 If so, try to split that. */
5017 code
= GET_CODE (SET_SRC (x
));
5022 /* If we are AND'ing with a large constant that is only a single
5023 bit and the result is only being used in a context where we
5024 need to know if it is zero or nonzero, replace it with a bit
5025 extraction. This will avoid the large constant, which might
5026 have taken more than one insn to make. If the constant were
5027 not a valid argument to the AND but took only one insn to make,
5028 this is no worse, but if it took more than one insn, it will
5031 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
5032 && REG_P (XEXP (SET_SRC (x
), 0))
5033 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
5034 && REG_P (SET_DEST (x
))
5035 && (split
= find_single_use (SET_DEST (x
), insn
, NULL
)) != 0
5036 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
5037 && XEXP (*split
, 0) == SET_DEST (x
)
5038 && XEXP (*split
, 1) == const0_rtx
)
5040 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
5041 XEXP (SET_SRC (x
), 0),
5042 pos
, NULL_RTX
, 1, 1, 0, 0);
5043 if (extraction
!= 0)
5045 SUBST (SET_SRC (x
), extraction
);
5046 return find_split_point (loc
, insn
, false);
5052 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5053 is known to be on, this can be converted into a NEG of a shift. */
5054 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
5055 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
5056 && ((pos
= exact_log2 (nonzero_bits (XEXP (SET_SRC (x
), 0),
5057 GET_MODE (XEXP (SET_SRC (x
),
5060 machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
5061 rtx pos_rtx
= gen_int_shift_amount (mode
, pos
);
5064 gen_rtx_LSHIFTRT (mode
,
5065 XEXP (SET_SRC (x
), 0),
5068 split
= find_split_point (&SET_SRC (x
), insn
, true);
5069 if (split
&& split
!= &SET_SRC (x
))
5075 inner
= XEXP (SET_SRC (x
), 0);
5077 /* We can't optimize if either mode is a partial integer
5078 mode as we don't know how many bits are significant
5080 if (!is_int_mode (GET_MODE (inner
), &inner_mode
)
5081 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
5085 len
= GET_MODE_PRECISION (inner_mode
);
5091 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_SRC (x
), 0)),
5093 && CONST_INT_P (XEXP (SET_SRC (x
), 1))
5094 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
5096 inner
= XEXP (SET_SRC (x
), 0);
5097 len
= INTVAL (XEXP (SET_SRC (x
), 1));
5098 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
5100 if (BITS_BIG_ENDIAN
)
5101 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
5102 unsignedp
= (code
== ZERO_EXTRACT
);
5111 && known_subrange_p (pos
, len
,
5112 0, GET_MODE_PRECISION (GET_MODE (inner
)))
5113 && is_a
<scalar_int_mode
> (GET_MODE (SET_SRC (x
)), &mode
))
5115 /* For unsigned, we have a choice of a shift followed by an
5116 AND or two shifts. Use two shifts for field sizes where the
5117 constant might be too large. We assume here that we can
5118 always at least get 8-bit constants in an AND insn, which is
5119 true for every current RISC. */
5121 if (unsignedp
&& len
<= 8)
5123 unsigned HOST_WIDE_INT mask
5124 = (HOST_WIDE_INT_1U
<< len
) - 1;
5125 rtx pos_rtx
= gen_int_shift_amount (mode
, pos
);
5129 (mode
, gen_lowpart (mode
, inner
), pos_rtx
),
5130 gen_int_mode (mask
, mode
)));
5132 split
= find_split_point (&SET_SRC (x
), insn
, true);
5133 if (split
&& split
!= &SET_SRC (x
))
5138 int left_bits
= GET_MODE_PRECISION (mode
) - len
- pos
;
5139 int right_bits
= GET_MODE_PRECISION (mode
) - len
;
5142 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
5143 gen_rtx_ASHIFT (mode
,
5144 gen_lowpart (mode
, inner
),
5145 gen_int_shift_amount (mode
, left_bits
)),
5146 gen_int_shift_amount (mode
, right_bits
)));
5148 split
= find_split_point (&SET_SRC (x
), insn
, true);
5149 if (split
&& split
!= &SET_SRC (x
))
5154 /* See if this is a simple operation with a constant as the second
5155 operand. It might be that this constant is out of range and hence
5156 could be used as a split point. */
5157 if (BINARY_P (SET_SRC (x
))
5158 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
5159 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
5160 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
5161 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
5162 return &XEXP (SET_SRC (x
), 1);
5164 /* Finally, see if this is a simple operation with its first operand
5165 not in a register. The operation might require this operand in a
5166 register, so return it as a split point. We can always do this
5167 because if the first operand were another operation, we would have
5168 already found it as a split point. */
5169 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
5170 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
5171 return &XEXP (SET_SRC (x
), 0);
5177 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5178 it is better to write this as (not (ior A B)) so we can split it.
5179 Similarly for IOR. */
5180 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
5183 gen_rtx_NOT (GET_MODE (x
),
5184 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
5186 XEXP (XEXP (x
, 0), 0),
5187 XEXP (XEXP (x
, 1), 0))));
5188 return find_split_point (loc
, insn
, set_src
);
5191 /* Many RISC machines have a large set of logical insns. If the
5192 second operand is a NOT, put it first so we will try to split the
5193 other operand first. */
5194 if (GET_CODE (XEXP (x
, 1)) == NOT
)
5196 rtx tem
= XEXP (x
, 0);
5197 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5198 SUBST (XEXP (x
, 1), tem
);
5204 /* Canonicalization can produce (minus A (mult B C)), where C is a
5205 constant. It may be better to try splitting (plus (mult B -C) A)
5206 instead if this isn't a multiply by a power of two. */
5207 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
5208 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5209 && !pow2p_hwi (INTVAL (XEXP (XEXP (x
, 1), 1))))
5211 machine_mode mode
= GET_MODE (x
);
5212 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
5213 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
5214 SUBST (*loc
, gen_rtx_PLUS (mode
,
5216 XEXP (XEXP (x
, 1), 0),
5217 gen_int_mode (other_int
,
5220 return find_split_point (loc
, insn
, set_src
);
5223 /* Split at a multiply-accumulate instruction. However if this is
5224 the SET_SRC, we likely do not have such an instruction and it's
5225 worthless to try this split. */
5227 && (GET_CODE (XEXP (x
, 0)) == MULT
5228 || (GET_CODE (XEXP (x
, 0)) == ASHIFT
5229 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
5236 /* Otherwise, select our actions depending on our rtx class. */
5237 switch (GET_RTX_CLASS (code
))
5239 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5241 split
= find_split_point (&XEXP (x
, 2), insn
, false);
5246 case RTX_COMM_ARITH
:
5248 case RTX_COMM_COMPARE
:
5249 split
= find_split_point (&XEXP (x
, 1), insn
, false);
5254 /* Some machines have (and (shift ...) ...) insns. If X is not
5255 an AND, but XEXP (X, 0) is, use it as our split point. */
5256 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
5257 return &XEXP (x
, 0);
5259 split
= find_split_point (&XEXP (x
, 0), insn
, false);
5265 /* Otherwise, we don't have a split point. */
5270 /* Throughout X, replace FROM with TO, and return the result.
5271 The result is TO if X is FROM;
5272 otherwise the result is X, but its contents may have been modified.
5273 If they were modified, a record was made in undobuf so that
5274 undo_all will (among other things) return X to its original state.
5276 If the number of changes necessary is too much to record to undo,
5277 the excess changes are not made, so the result is invalid.
5278 The changes already made can still be undone.
5279 undobuf.num_undo is incremented for such changes, so by testing that
5280 the caller can tell whether the result is valid.
5282 `n_occurrences' is incremented each time FROM is replaced.
5284 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5286 IN_COND is nonzero if we are at the top level of a condition.
5288 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5289 by copying if `n_occurrences' is nonzero. */
5292 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int in_cond
, int unique_copy
)
5294 enum rtx_code code
= GET_CODE (x
);
5295 machine_mode op0_mode
= VOIDmode
;
5300 /* Two expressions are equal if they are identical copies of a shared
5301 RTX or if they are both registers with the same register number
5304 #define COMBINE_RTX_EQUAL_P(X,Y) \
5306 || (REG_P (X) && REG_P (Y) \
5307 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5309 /* Do not substitute into clobbers of regs -- this will never result in
5311 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
5314 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
5317 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
5320 /* If X and FROM are the same register but different modes, they
5321 will not have been seen as equal above. However, the log links code
5322 will make a LOG_LINKS entry for that case. If we do nothing, we
5323 will try to rerecognize our original insn and, when it succeeds,
5324 we will delete the feeding insn, which is incorrect.
5326 So force this insn not to match in this (rare) case. */
5327 if (! in_dest
&& code
== REG
&& REG_P (from
)
5328 && reg_overlap_mentioned_p (x
, from
))
5329 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5331 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5332 of which may contain things that can be combined. */
5333 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5336 /* It is possible to have a subexpression appear twice in the insn.
5337 Suppose that FROM is a register that appears within TO.
5338 Then, after that subexpression has been scanned once by `subst',
5339 the second time it is scanned, TO may be found. If we were
5340 to scan TO here, we would find FROM within it and create a
5341 self-referent rtl structure which is completely wrong. */
5342 if (COMBINE_RTX_EQUAL_P (x
, to
))
5345 /* Parallel asm_operands need special attention because all of the
5346 inputs are shared across the arms. Furthermore, unsharing the
5347 rtl results in recognition failures. Failure to handle this case
5348 specially can result in circular rtl.
5350 Solve this by doing a normal pass across the first entry of the
5351 parallel, and only processing the SET_DESTs of the subsequent
5354 if (code
== PARALLEL
5355 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5356 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5358 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, 0, unique_copy
);
5360 /* If this substitution failed, this whole thing fails. */
5361 if (GET_CODE (new_rtx
) == CLOBBER
5362 && XEXP (new_rtx
, 0) == const0_rtx
)
5365 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5367 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5369 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5371 if (!REG_P (dest
) && GET_CODE (dest
) != PC
)
5373 new_rtx
= subst (dest
, from
, to
, 0, 0, unique_copy
);
5375 /* If this substitution failed, this whole thing fails. */
5376 if (GET_CODE (new_rtx
) == CLOBBER
5377 && XEXP (new_rtx
, 0) == const0_rtx
)
5380 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5386 len
= GET_RTX_LENGTH (code
);
5387 fmt
= GET_RTX_FORMAT (code
);
5389 /* We don't need to process a SET_DEST that is a register or PC, so
5390 set up to skip this common case. All other cases where we want
5391 to suppress replacing something inside a SET_SRC are handled via
5392 the IN_DEST operand. */
5394 && (REG_P (SET_DEST (x
))
5395 || GET_CODE (SET_DEST (x
)) == PC
))
5398 /* Trying to simplify the operands of a widening MULT is not likely
5399 to create RTL matching a machine insn. */
5401 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
5402 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
5403 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
5404 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
5405 && REG_P (XEXP (XEXP (x
, 0), 0))
5406 && REG_P (XEXP (XEXP (x
, 1), 0))
5411 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5414 op0_mode
= GET_MODE (XEXP (x
, 0));
5416 for (i
= 0; i
< len
; i
++)
5421 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5423 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5425 new_rtx
= (unique_copy
&& n_occurrences
5426 ? copy_rtx (to
) : to
);
5431 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0, 0,
5434 /* If this substitution failed, this whole thing
5436 if (GET_CODE (new_rtx
) == CLOBBER
5437 && XEXP (new_rtx
, 0) == const0_rtx
)
5441 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5444 else if (fmt
[i
] == 'e')
5446 /* If this is a register being set, ignore it. */
5447 new_rtx
= XEXP (x
, i
);
5450 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5452 || code
== STRICT_LOW_PART
))
5455 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5457 /* In general, don't install a subreg involving two
5458 modes not tieable. It can worsen register
5459 allocation, and can even make invalid reload
5460 insns, since the reg inside may need to be copied
5461 from in the outside mode, and that may be invalid
5462 if it is an fp reg copied in integer mode.
5464 We allow an exception to this: It is valid if
5465 it is inside another SUBREG and the mode of that
5466 SUBREG and the mode of the inside of TO is
5469 if (GET_CODE (to
) == SUBREG
5470 && !targetm
.modes_tieable_p (GET_MODE (to
),
5471 GET_MODE (SUBREG_REG (to
)))
5472 && ! (code
== SUBREG
5473 && (targetm
.modes_tieable_p
5474 (GET_MODE (x
), GET_MODE (SUBREG_REG (to
))))))
5475 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5479 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5480 && simplify_subreg_regno (REGNO (to
), GET_MODE (to
),
5483 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5485 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5489 /* If we are in a SET_DEST, suppress most cases unless we
5490 have gone inside a MEM, in which case we want to
5491 simplify the address. We assume here that things that
5492 are actually part of the destination have their inner
5493 parts in the first expression. This is true for SUBREG,
5494 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5495 things aside from REG and MEM that should appear in a
5497 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5499 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5500 || code
== ZERO_EXTRACT
))
5503 code
== IF_THEN_ELSE
&& i
== 0,
5506 /* If we found that we will have to reject this combination,
5507 indicate that by returning the CLOBBER ourselves, rather than
5508 an expression containing it. This will speed things up as
5509 well as prevent accidents where two CLOBBERs are considered
5510 to be equal, thus producing an incorrect simplification. */
5512 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5515 if (GET_CODE (x
) == SUBREG
&& CONST_SCALAR_INT_P (new_rtx
))
5517 machine_mode mode
= GET_MODE (x
);
5519 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5520 GET_MODE (SUBREG_REG (x
)),
5523 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5525 else if (CONST_SCALAR_INT_P (new_rtx
)
5526 && (GET_CODE (x
) == ZERO_EXTEND
5527 || GET_CODE (x
) == SIGN_EXTEND
5528 || GET_CODE (x
) == FLOAT
5529 || GET_CODE (x
) == UNSIGNED_FLOAT
))
5531 x
= simplify_unary_operation (GET_CODE (x
), GET_MODE (x
),
5533 GET_MODE (XEXP (x
, 0)));
5535 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5538 SUBST (XEXP (x
, i
), new_rtx
);
5543 /* Check if we are loading something from the constant pool via float
5544 extension; in this case we would undo compress_float_constant
5545 optimization and degenerate constant load to an immediate value. */
5546 if (GET_CODE (x
) == FLOAT_EXTEND
5547 && MEM_P (XEXP (x
, 0))
5548 && MEM_READONLY_P (XEXP (x
, 0)))
5550 rtx tmp
= avoid_constant_pool_reference (x
);
5555 /* Try to simplify X. If the simplification changed the code, it is likely
5556 that further simplification will help, so loop, but limit the number
5557 of repetitions that will be performed. */
5559 for (i
= 0; i
< 4; i
++)
5561 /* If X is sufficiently simple, don't bother trying to do anything
5563 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5564 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
, in_cond
);
5566 if (GET_CODE (x
) == code
)
5569 code
= GET_CODE (x
);
5571 /* We no longer know the original mode of operand 0 since we
5572 have changed the form of X) */
5573 op0_mode
= VOIDmode
;
5579 /* If X is a commutative operation whose operands are not in the canonical
5580 order, use substitutions to swap them. */
5583 maybe_swap_commutative_operands (rtx x
)
5585 if (COMMUTATIVE_ARITH_P (x
)
5586 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5588 rtx temp
= XEXP (x
, 0);
5589 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5590 SUBST (XEXP (x
, 1), temp
);
5594 /* Simplify X, a piece of RTL. We just operate on the expression at the
5595 outer level; call `subst' to simplify recursively. Return the new
5598 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5599 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5603 combine_simplify_rtx (rtx x
, machine_mode op0_mode
, int in_dest
,
5606 enum rtx_code code
= GET_CODE (x
);
5607 machine_mode mode
= GET_MODE (x
);
5608 scalar_int_mode int_mode
;
5612 /* If this is a commutative operation, put a constant last and a complex
5613 expression first. We don't need to do this for comparisons here. */
5614 maybe_swap_commutative_operands (x
);
5616 /* Try to fold this expression in case we have constants that weren't
5619 switch (GET_RTX_CLASS (code
))
5622 if (op0_mode
== VOIDmode
)
5623 op0_mode
= GET_MODE (XEXP (x
, 0));
5624 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5627 case RTX_COMM_COMPARE
:
5629 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5630 if (cmp_mode
== VOIDmode
)
5632 cmp_mode
= GET_MODE (XEXP (x
, 1));
5633 if (cmp_mode
== VOIDmode
)
5634 cmp_mode
= op0_mode
;
5636 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5637 XEXP (x
, 0), XEXP (x
, 1));
5640 case RTX_COMM_ARITH
:
5642 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5644 case RTX_BITFIELD_OPS
:
5646 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5647 XEXP (x
, 1), XEXP (x
, 2));
5656 code
= GET_CODE (temp
);
5657 op0_mode
= VOIDmode
;
5658 mode
= GET_MODE (temp
);
5661 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5662 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5663 things. Check for cases where both arms are testing the same
5666 Don't do anything if all operands are very simple. */
5669 && ((!OBJECT_P (XEXP (x
, 0))
5670 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5671 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5672 || (!OBJECT_P (XEXP (x
, 1))
5673 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5674 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5676 && (!OBJECT_P (XEXP (x
, 0))
5677 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5678 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5680 rtx cond
, true_rtx
, false_rtx
;
5682 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5684 /* If everything is a comparison, what we have is highly unlikely
5685 to be simpler, so don't use it. */
5686 && ! (COMPARISON_P (x
)
5687 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
)))
5688 /* Similarly, if we end up with one of the expressions the same
5689 as the original, it is certainly not simpler. */
5690 && ! rtx_equal_p (x
, true_rtx
)
5691 && ! rtx_equal_p (x
, false_rtx
))
5693 rtx cop1
= const0_rtx
;
5694 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5696 if (cond_code
== NE
&& COMPARISON_P (cond
))
5699 /* Simplify the alternative arms; this may collapse the true and
5700 false arms to store-flag values. Be careful to use copy_rtx
5701 here since true_rtx or false_rtx might share RTL with x as a
5702 result of the if_then_else_cond call above. */
5703 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5704 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5706 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5707 is unlikely to be simpler. */
5708 if (general_operand (true_rtx
, VOIDmode
)
5709 && general_operand (false_rtx
, VOIDmode
))
5711 enum rtx_code reversed
;
5713 /* Restarting if we generate a store-flag expression will cause
5714 us to loop. Just drop through in this case. */
5716 /* If the result values are STORE_FLAG_VALUE and zero, we can
5717 just make the comparison operation. */
5718 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5719 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5721 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5722 && ((reversed
= reversed_comparison_code_parts
5723 (cond_code
, cond
, cop1
, NULL
))
5725 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5728 /* Likewise, we can make the negate of a comparison operation
5729 if the result values are - STORE_FLAG_VALUE and zero. */
5730 else if (CONST_INT_P (true_rtx
)
5731 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5732 && false_rtx
== const0_rtx
)
5733 x
= simplify_gen_unary (NEG
, mode
,
5734 simplify_gen_relational (cond_code
,
5738 else if (CONST_INT_P (false_rtx
)
5739 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5740 && true_rtx
== const0_rtx
5741 && ((reversed
= reversed_comparison_code_parts
5742 (cond_code
, cond
, cop1
, NULL
))
5744 x
= simplify_gen_unary (NEG
, mode
,
5745 simplify_gen_relational (reversed
,
5750 code
= GET_CODE (x
);
5751 op0_mode
= VOIDmode
;
5756 /* First see if we can apply the inverse distributive law. */
5757 if (code
== PLUS
|| code
== MINUS
5758 || code
== AND
|| code
== IOR
|| code
== XOR
)
5760 x
= apply_distributive_law (x
);
5761 code
= GET_CODE (x
);
5762 op0_mode
= VOIDmode
;
5765 /* If CODE is an associative operation not otherwise handled, see if we
5766 can associate some operands. This can win if they are constants or
5767 if they are logically related (i.e. (a & b) & a). */
5768 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5769 || code
== AND
|| code
== IOR
|| code
== XOR
5770 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5771 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5772 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5774 if (GET_CODE (XEXP (x
, 0)) == code
)
5776 rtx other
= XEXP (XEXP (x
, 0), 0);
5777 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5778 rtx inner_op1
= XEXP (x
, 1);
5781 /* Make sure we pass the constant operand if any as the second
5782 one if this is a commutative operation. */
5783 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5784 std::swap (inner_op0
, inner_op1
);
5785 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5786 : code
== DIV
? MULT
5788 mode
, inner_op0
, inner_op1
);
5790 /* For commutative operations, try the other pair if that one
5792 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5794 other
= XEXP (XEXP (x
, 0), 1);
5795 inner
= simplify_binary_operation (code
, mode
,
5796 XEXP (XEXP (x
, 0), 0),
5801 return simplify_gen_binary (code
, mode
, other
, inner
);
5805 /* A little bit of algebraic simplification here. */
5809 /* Ensure that our address has any ASHIFTs converted to MULT in case
5810 address-recognizing predicates are called later. */
5811 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5812 SUBST (XEXP (x
, 0), temp
);
5816 if (op0_mode
== VOIDmode
)
5817 op0_mode
= GET_MODE (SUBREG_REG (x
));
5819 /* See if this can be moved to simplify_subreg. */
5820 if (CONSTANT_P (SUBREG_REG (x
))
5821 && known_eq (subreg_lowpart_offset (mode
, op0_mode
), SUBREG_BYTE (x
))
5822 /* Don't call gen_lowpart if the inner mode
5823 is VOIDmode and we cannot simplify it, as SUBREG without
5824 inner mode is invalid. */
5825 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5826 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5827 return gen_lowpart (mode
, SUBREG_REG (x
));
5829 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
5833 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
5838 /* If op is known to have all lower bits zero, the result is zero. */
5839 scalar_int_mode int_mode
, int_op0_mode
;
5841 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5842 && is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
)
5843 && (GET_MODE_PRECISION (int_mode
)
5844 < GET_MODE_PRECISION (int_op0_mode
))
5845 && known_eq (subreg_lowpart_offset (int_mode
, int_op0_mode
),
5847 && HWI_COMPUTABLE_MODE_P (int_op0_mode
)
5848 && ((nonzero_bits (SUBREG_REG (x
), int_op0_mode
)
5849 & GET_MODE_MASK (int_mode
)) == 0)
5850 && !side_effects_p (SUBREG_REG (x
)))
5851 return CONST0_RTX (int_mode
);
5854 /* Don't change the mode of the MEM if that would change the meaning
5856 if (MEM_P (SUBREG_REG (x
))
5857 && (MEM_VOLATILE_P (SUBREG_REG (x
))
5858 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0),
5859 MEM_ADDR_SPACE (SUBREG_REG (x
)))))
5860 return gen_rtx_CLOBBER (mode
, const0_rtx
);
5862 /* Note that we cannot do any narrowing for non-constants since
5863 we might have been counting on using the fact that some bits were
5864 zero. We now do this in the SET. */
5869 temp
= expand_compound_operation (XEXP (x
, 0));
5871 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5872 replaced by (lshiftrt X C). This will convert
5873 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5875 if (GET_CODE (temp
) == ASHIFTRT
5876 && CONST_INT_P (XEXP (temp
, 1))
5877 && INTVAL (XEXP (temp
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
5878 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
5879 INTVAL (XEXP (temp
, 1)));
5881 /* If X has only a single bit that might be nonzero, say, bit I, convert
5882 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5883 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5884 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5885 or a SUBREG of one since we'd be making the expression more
5886 complex if it was just a register. */
5889 && ! (GET_CODE (temp
) == SUBREG
5890 && REG_P (SUBREG_REG (temp
)))
5891 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5892 && (i
= exact_log2 (nonzero_bits (temp
, int_mode
))) >= 0)
5894 rtx temp1
= simplify_shift_const
5895 (NULL_RTX
, ASHIFTRT
, int_mode
,
5896 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
, temp
,
5897 GET_MODE_PRECISION (int_mode
) - 1 - i
),
5898 GET_MODE_PRECISION (int_mode
) - 1 - i
);
5900 /* If all we did was surround TEMP with the two shifts, we
5901 haven't improved anything, so don't use it. Otherwise,
5902 we are better off with TEMP1. */
5903 if (GET_CODE (temp1
) != ASHIFTRT
5904 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
5905 || XEXP (XEXP (temp1
, 0), 0) != temp
)
5911 /* We can't handle truncation to a partial integer mode here
5912 because we don't know the real bitsize of the partial
5914 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
5917 if (HWI_COMPUTABLE_MODE_P (mode
))
5919 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5920 GET_MODE_MASK (mode
), 0));
5922 /* We can truncate a constant value and return it. */
5925 if (poly_int_rtx_p (XEXP (x
, 0), &c
))
5926 return gen_int_mode (c
, mode
);
5929 /* Similarly to what we do in simplify-rtx.cc, a truncate of a register
5930 whose value is a comparison can be replaced with a subreg if
5931 STORE_FLAG_VALUE permits. */
5932 if (HWI_COMPUTABLE_MODE_P (mode
)
5933 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
5934 && (temp
= get_last_value (XEXP (x
, 0)))
5935 && COMPARISON_P (temp
)
5936 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (XEXP (x
, 0))))
5937 return gen_lowpart (mode
, XEXP (x
, 0));
5941 /* (const (const X)) can become (const X). Do it this way rather than
5942 returning the inner CONST since CONST can be shared with a
5944 if (GET_CODE (XEXP (x
, 0)) == CONST
)
5945 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
5949 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5950 can add in an offset. find_split_point will split this address up
5951 again if it doesn't match. */
5952 if (HAVE_lo_sum
&& GET_CODE (XEXP (x
, 0)) == HIGH
5953 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
5958 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5959 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5960 bit-field and can be replaced by either a sign_extend or a
5961 sign_extract. The `and' may be a zero_extend and the two
5962 <c>, -<c> constants may be reversed. */
5963 if (GET_CODE (XEXP (x
, 0)) == XOR
5964 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5965 && CONST_INT_P (XEXP (x
, 1))
5966 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
5967 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
5968 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
5969 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
5970 && HWI_COMPUTABLE_MODE_P (int_mode
)
5971 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
5972 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5973 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5974 == (HOST_WIDE_INT_1U
<< (i
+ 1)) - 1))
5975 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
5976 && known_eq ((GET_MODE_PRECISION
5977 (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))),
5978 (unsigned int) i
+ 1))))
5979 return simplify_shift_const
5980 (NULL_RTX
, ASHIFTRT
, int_mode
,
5981 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
5982 XEXP (XEXP (XEXP (x
, 0), 0), 0),
5983 GET_MODE_PRECISION (int_mode
) - (i
+ 1)),
5984 GET_MODE_PRECISION (int_mode
) - (i
+ 1));
5986 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5987 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5988 the bitsize of the mode - 1. This allows simplification of
5989 "a = (b & 8) == 0;" */
5990 if (XEXP (x
, 1) == constm1_rtx
5991 && !REG_P (XEXP (x
, 0))
5992 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5993 && REG_P (SUBREG_REG (XEXP (x
, 0))))
5994 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5995 && nonzero_bits (XEXP (x
, 0), int_mode
) == 1)
5996 return simplify_shift_const
5997 (NULL_RTX
, ASHIFTRT
, int_mode
,
5998 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
5999 gen_rtx_XOR (int_mode
, XEXP (x
, 0),
6001 GET_MODE_PRECISION (int_mode
) - 1),
6002 GET_MODE_PRECISION (int_mode
) - 1);
6004 /* If we are adding two things that have no bits in common, convert
6005 the addition into an IOR. This will often be further simplified,
6006 for example in cases like ((a & 1) + (a & 2)), which can
6009 if (HWI_COMPUTABLE_MODE_P (mode
)
6010 && (nonzero_bits (XEXP (x
, 0), mode
)
6011 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
6013 /* Try to simplify the expression further. */
6014 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6015 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
, 0);
6017 /* If we could, great. If not, do not go ahead with the IOR
6018 replacement, since PLUS appears in many special purpose
6019 address arithmetic instructions. */
6020 if (GET_CODE (temp
) != CLOBBER
6021 && (GET_CODE (temp
) != IOR
6022 || ((XEXP (temp
, 0) != XEXP (x
, 0)
6023 || XEXP (temp
, 1) != XEXP (x
, 1))
6024 && (XEXP (temp
, 0) != XEXP (x
, 1)
6025 || XEXP (temp
, 1) != XEXP (x
, 0)))))
6029 /* Canonicalize x + x into x << 1. */
6030 if (GET_MODE_CLASS (mode
) == MODE_INT
6031 && rtx_equal_p (XEXP (x
, 0), XEXP (x
, 1))
6032 && !side_effects_p (XEXP (x
, 0)))
6033 return simplify_gen_binary (ASHIFT
, mode
, XEXP (x
, 0), const1_rtx
);
6038 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6039 (and <foo> (const_int pow2-1)) */
6040 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6041 && GET_CODE (XEXP (x
, 1)) == AND
6042 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
6043 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x
, 1), 1)))
6044 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
6045 return simplify_and_const_int (NULL_RTX
, int_mode
, XEXP (x
, 0),
6046 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
6050 /* If we have (mult (plus A B) C), apply the distributive law and then
6051 the inverse distributive law to see if things simplify. This
6052 occurs mostly in addresses, often when unrolling loops. */
6054 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
6056 rtx result
= distribute_and_simplify_rtx (x
, 0);
6061 /* Try simplify a*(b/c) as (a*b)/c. */
6062 if (FLOAT_MODE_P (mode
) && flag_associative_math
6063 && GET_CODE (XEXP (x
, 0)) == DIV
)
6065 rtx tem
= simplify_binary_operation (MULT
, mode
,
6066 XEXP (XEXP (x
, 0), 0),
6069 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
6074 /* If this is a divide by a power of two, treat it as a shift if
6075 its first operand is a shift. */
6076 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6077 && CONST_INT_P (XEXP (x
, 1))
6078 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
6079 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
6080 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
6081 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
6082 || GET_CODE (XEXP (x
, 0)) == ROTATE
6083 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
6084 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, int_mode
,
6089 case GT
: case GTU
: case GE
: case GEU
:
6090 case LT
: case LTU
: case LE
: case LEU
:
6091 case UNEQ
: case LTGT
:
6092 case UNGT
: case UNGE
:
6093 case UNLT
: case UNLE
:
6094 case UNORDERED
: case ORDERED
:
6095 /* If the first operand is a condition code, we can't do anything
6097 if (GET_CODE (XEXP (x
, 0)) == COMPARE
6098 || GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
)
6100 rtx op0
= XEXP (x
, 0);
6101 rtx op1
= XEXP (x
, 1);
6102 enum rtx_code new_code
;
6104 if (GET_CODE (op0
) == COMPARE
)
6105 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
6107 /* Simplify our comparison, if possible. */
6108 new_code
= simplify_comparison (code
, &op0
, &op1
);
6110 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6111 if only the low-order bit is possibly nonzero in X (such as when
6112 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6113 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6114 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6117 Remove any ZERO_EXTRACT we made when thinking this was a
6118 comparison. It may now be simpler to use, e.g., an AND. If a
6119 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6120 the call to make_compound_operation in the SET case.
6122 Don't apply these optimizations if the caller would
6123 prefer a comparison rather than a value.
6124 E.g., for the condition in an IF_THEN_ELSE most targets need
6125 an explicit comparison. */
6130 else if (STORE_FLAG_VALUE
== 1
6132 && is_int_mode (mode
, &int_mode
)
6133 && op1
== const0_rtx
6134 && int_mode
== GET_MODE (op0
)
6135 && nonzero_bits (op0
, int_mode
) == 1)
6136 return gen_lowpart (int_mode
,
6137 expand_compound_operation (op0
));
6139 else if (STORE_FLAG_VALUE
== 1
6141 && is_int_mode (mode
, &int_mode
)
6142 && op1
== const0_rtx
6143 && int_mode
== GET_MODE (op0
)
6144 && (num_sign_bit_copies (op0
, int_mode
)
6145 == GET_MODE_PRECISION (int_mode
)))
6147 op0
= expand_compound_operation (op0
);
6148 return simplify_gen_unary (NEG
, int_mode
,
6149 gen_lowpart (int_mode
, op0
),
6153 else if (STORE_FLAG_VALUE
== 1
6155 && is_int_mode (mode
, &int_mode
)
6156 && op1
== const0_rtx
6157 && int_mode
== GET_MODE (op0
)
6158 && nonzero_bits (op0
, int_mode
) == 1)
6160 op0
= expand_compound_operation (op0
);
6161 return simplify_gen_binary (XOR
, int_mode
,
6162 gen_lowpart (int_mode
, op0
),
6166 else if (STORE_FLAG_VALUE
== 1
6168 && is_int_mode (mode
, &int_mode
)
6169 && op1
== const0_rtx
6170 && int_mode
== GET_MODE (op0
)
6171 && (num_sign_bit_copies (op0
, int_mode
)
6172 == GET_MODE_PRECISION (int_mode
)))
6174 op0
= expand_compound_operation (op0
);
6175 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), 1);
6178 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6183 else if (STORE_FLAG_VALUE
== -1
6185 && is_int_mode (mode
, &int_mode
)
6186 && op1
== const0_rtx
6187 && int_mode
== GET_MODE (op0
)
6188 && (num_sign_bit_copies (op0
, int_mode
)
6189 == GET_MODE_PRECISION (int_mode
)))
6190 return gen_lowpart (int_mode
, expand_compound_operation (op0
));
6192 else if (STORE_FLAG_VALUE
== -1
6194 && is_int_mode (mode
, &int_mode
)
6195 && op1
== const0_rtx
6196 && int_mode
== GET_MODE (op0
)
6197 && nonzero_bits (op0
, int_mode
) == 1)
6199 op0
= expand_compound_operation (op0
);
6200 return simplify_gen_unary (NEG
, int_mode
,
6201 gen_lowpart (int_mode
, op0
),
6205 else if (STORE_FLAG_VALUE
== -1
6207 && is_int_mode (mode
, &int_mode
)
6208 && op1
== const0_rtx
6209 && int_mode
== GET_MODE (op0
)
6210 && (num_sign_bit_copies (op0
, int_mode
)
6211 == GET_MODE_PRECISION (int_mode
)))
6213 op0
= expand_compound_operation (op0
);
6214 return simplify_gen_unary (NOT
, int_mode
,
6215 gen_lowpart (int_mode
, op0
),
6219 /* If X is 0/1, (eq X 0) is X-1. */
6220 else if (STORE_FLAG_VALUE
== -1
6222 && is_int_mode (mode
, &int_mode
)
6223 && op1
== const0_rtx
6224 && int_mode
== GET_MODE (op0
)
6225 && nonzero_bits (op0
, int_mode
) == 1)
6227 op0
= expand_compound_operation (op0
);
6228 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), -1);
6231 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6232 one bit that might be nonzero, we can convert (ne x 0) to
6233 (ashift x c) where C puts the bit in the sign bit. Remove any
6234 AND with STORE_FLAG_VALUE when we are done, since we are only
6235 going to test the sign bit. */
6237 && is_int_mode (mode
, &int_mode
)
6238 && HWI_COMPUTABLE_MODE_P (int_mode
)
6239 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
6240 && op1
== const0_rtx
6241 && int_mode
== GET_MODE (op0
)
6242 && (i
= exact_log2 (nonzero_bits (op0
, int_mode
))) >= 0)
6244 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6245 expand_compound_operation (op0
),
6246 GET_MODE_PRECISION (int_mode
) - 1 - i
);
6247 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
6253 /* If the code changed, return a whole new comparison.
6254 We also need to avoid using SUBST in cases where
6255 simplify_comparison has widened a comparison with a CONST_INT,
6256 since in that case the wider CONST_INT may fail the sanity
6257 checks in do_SUBST. */
6258 if (new_code
!= code
6259 || (CONST_INT_P (op1
)
6260 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 0))
6261 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 1))))
6262 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
6264 /* Otherwise, keep this operation, but maybe change its operands.
6265 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6266 SUBST (XEXP (x
, 0), op0
);
6267 SUBST (XEXP (x
, 1), op1
);
6272 return simplify_if_then_else (x
);
6278 /* If we are processing SET_DEST, we are done. */
6282 return expand_compound_operation (x
);
6285 return simplify_set (x
);
6289 return simplify_logical (x
);
6296 /* If this is a shift by a constant amount, simplify it. */
6297 if (CONST_INT_P (XEXP (x
, 1)))
6298 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
6299 INTVAL (XEXP (x
, 1)));
6301 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
6303 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
6305 << exact_log2 (GET_MODE_UNIT_BITSIZE
6312 rtx trueop0
= XEXP (x
, 0);
6313 mode
= GET_MODE (trueop0
);
6314 rtx trueop1
= XEXP (x
, 1);
6315 /* If we select a low-part subreg, return that. */
6316 if (vec_series_lowpart_p (GET_MODE (x
), mode
, trueop1
))
6318 rtx new_rtx
= lowpart_subreg (GET_MODE (x
), trueop0
, mode
);
6319 if (new_rtx
!= NULL_RTX
)
6331 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6334 simplify_if_then_else (rtx x
)
6336 machine_mode mode
= GET_MODE (x
);
6337 rtx cond
= XEXP (x
, 0);
6338 rtx true_rtx
= XEXP (x
, 1);
6339 rtx false_rtx
= XEXP (x
, 2);
6340 enum rtx_code true_code
= GET_CODE (cond
);
6341 int comparison_p
= COMPARISON_P (cond
);
6344 enum rtx_code false_code
;
6346 scalar_int_mode int_mode
, inner_mode
;
6348 /* Simplify storing of the truth value. */
6349 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
6350 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
6351 XEXP (cond
, 0), XEXP (cond
, 1));
6353 /* Also when the truth value has to be reversed. */
6355 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
6356 && (reversed
= reversed_comparison (cond
, mode
)))
6359 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6360 in it is being compared against certain values. Get the true and false
6361 comparisons and see if that says anything about the value of each arm. */
6364 && ((false_code
= reversed_comparison_code (cond
, NULL
))
6366 && REG_P (XEXP (cond
, 0)))
6369 rtx from
= XEXP (cond
, 0);
6370 rtx true_val
= XEXP (cond
, 1);
6371 rtx false_val
= true_val
;
6374 /* If FALSE_CODE is EQ, swap the codes and arms. */
6376 if (false_code
== EQ
)
6378 swapped
= 1, true_code
= EQ
, false_code
= NE
;
6379 std::swap (true_rtx
, false_rtx
);
6382 scalar_int_mode from_mode
;
6383 if (is_a
<scalar_int_mode
> (GET_MODE (from
), &from_mode
))
6385 /* If we are comparing against zero and the expression being
6386 tested has only a single bit that might be nonzero, that is
6387 its value when it is not equal to zero. Similarly if it is
6388 known to be -1 or 0. */
6390 && true_val
== const0_rtx
6391 && pow2p_hwi (nzb
= nonzero_bits (from
, from_mode
)))
6394 false_val
= gen_int_mode (nzb
, from_mode
);
6396 else if (true_code
== EQ
6397 && true_val
== const0_rtx
6398 && (num_sign_bit_copies (from
, from_mode
)
6399 == GET_MODE_PRECISION (from_mode
)))
6402 false_val
= constm1_rtx
;
6406 /* Now simplify an arm if we know the value of the register in the
6407 branch and it is used in the arm. Be careful due to the potential
6408 of locally-shared RTL. */
6410 if (reg_mentioned_p (from
, true_rtx
))
6411 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
6413 pc_rtx
, pc_rtx
, 0, 0, 0);
6414 if (reg_mentioned_p (from
, false_rtx
))
6415 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
6417 pc_rtx
, pc_rtx
, 0, 0, 0);
6419 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
6420 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
6422 true_rtx
= XEXP (x
, 1);
6423 false_rtx
= XEXP (x
, 2);
6424 true_code
= GET_CODE (cond
);
6427 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6428 reversed, do so to avoid needing two sets of patterns for
6429 subtract-and-branch insns. Similarly if we have a constant in the true
6430 arm, the false arm is the same as the first operand of the comparison, or
6431 the false arm is more complicated than the true arm. */
6434 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6435 && (true_rtx
== pc_rtx
6436 || (CONSTANT_P (true_rtx
)
6437 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6438 || true_rtx
== const0_rtx
6439 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6440 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6441 && !OBJECT_P (false_rtx
))
6442 || reg_mentioned_p (true_rtx
, false_rtx
)
6443 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6445 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6446 SUBST (XEXP (x
, 1), false_rtx
);
6447 SUBST (XEXP (x
, 2), true_rtx
);
6449 std::swap (true_rtx
, false_rtx
);
6452 /* It is possible that the conditional has been simplified out. */
6453 true_code
= GET_CODE (cond
);
6454 comparison_p
= COMPARISON_P (cond
);
6457 /* If the two arms are identical, we don't need the comparison. */
6459 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6462 /* Convert a == b ? b : a to "a". */
6463 if (true_code
== EQ
&& ! side_effects_p (cond
)
6464 && !HONOR_NANS (mode
)
6465 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6466 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6468 else if (true_code
== NE
&& ! side_effects_p (cond
)
6469 && !HONOR_NANS (mode
)
6470 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6471 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6474 /* Look for cases where we have (abs x) or (neg (abs X)). */
6476 if (GET_MODE_CLASS (mode
) == MODE_INT
6478 && XEXP (cond
, 1) == const0_rtx
6479 && GET_CODE (false_rtx
) == NEG
6480 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6481 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6482 && ! side_effects_p (true_rtx
))
6487 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6491 simplify_gen_unary (NEG
, mode
,
6492 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6498 /* Look for MIN or MAX. */
6500 if ((! FLOAT_MODE_P (mode
)
6501 || (flag_unsafe_math_optimizations
6502 && !HONOR_NANS (mode
)
6503 && !HONOR_SIGNED_ZEROS (mode
)))
6505 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6506 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6507 && ! side_effects_p (cond
))
6512 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6515 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6518 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6521 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6526 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6527 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6528 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6529 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6530 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6531 neither 1 or -1, but it isn't worth checking for. */
6533 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6535 && is_int_mode (mode
, &int_mode
)
6536 && ! side_effects_p (x
))
6538 rtx t
= make_compound_operation (true_rtx
, SET
);
6539 rtx f
= make_compound_operation (false_rtx
, SET
);
6540 rtx cond_op0
= XEXP (cond
, 0);
6541 rtx cond_op1
= XEXP (cond
, 1);
6542 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6543 scalar_int_mode m
= int_mode
;
6544 rtx z
= 0, c1
= NULL_RTX
;
6546 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6547 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6548 || GET_CODE (t
) == ASHIFT
6549 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6550 && rtx_equal_p (XEXP (t
, 0), f
))
6551 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6553 /* If an identity-zero op is commutative, check whether there
6554 would be a match if we swapped the operands. */
6555 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6556 || GET_CODE (t
) == XOR
)
6557 && rtx_equal_p (XEXP (t
, 1), f
))
6558 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6559 else if (GET_CODE (t
) == SIGN_EXTEND
6560 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6561 && (GET_CODE (XEXP (t
, 0)) == PLUS
6562 || GET_CODE (XEXP (t
, 0)) == MINUS
6563 || GET_CODE (XEXP (t
, 0)) == IOR
6564 || GET_CODE (XEXP (t
, 0)) == XOR
6565 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6566 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6567 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6568 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6569 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6570 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6571 && (num_sign_bit_copies (f
, GET_MODE (f
))
6573 (GET_MODE_PRECISION (int_mode
)
6574 - GET_MODE_PRECISION (inner_mode
))))
6576 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6577 extend_op
= SIGN_EXTEND
;
6580 else if (GET_CODE (t
) == SIGN_EXTEND
6581 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6582 && (GET_CODE (XEXP (t
, 0)) == PLUS
6583 || GET_CODE (XEXP (t
, 0)) == IOR
6584 || GET_CODE (XEXP (t
, 0)) == XOR
)
6585 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6586 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6587 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6588 && (num_sign_bit_copies (f
, GET_MODE (f
))
6590 (GET_MODE_PRECISION (int_mode
)
6591 - GET_MODE_PRECISION (inner_mode
))))
6593 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6594 extend_op
= SIGN_EXTEND
;
6597 else if (GET_CODE (t
) == ZERO_EXTEND
6598 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6599 && (GET_CODE (XEXP (t
, 0)) == PLUS
6600 || GET_CODE (XEXP (t
, 0)) == MINUS
6601 || GET_CODE (XEXP (t
, 0)) == IOR
6602 || GET_CODE (XEXP (t
, 0)) == XOR
6603 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6604 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6605 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6606 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6607 && HWI_COMPUTABLE_MODE_P (int_mode
)
6608 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6609 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6610 && ((nonzero_bits (f
, GET_MODE (f
))
6611 & ~GET_MODE_MASK (inner_mode
))
6614 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6615 extend_op
= ZERO_EXTEND
;
6618 else if (GET_CODE (t
) == ZERO_EXTEND
6619 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6620 && (GET_CODE (XEXP (t
, 0)) == PLUS
6621 || GET_CODE (XEXP (t
, 0)) == IOR
6622 || GET_CODE (XEXP (t
, 0)) == XOR
)
6623 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6624 && HWI_COMPUTABLE_MODE_P (int_mode
)
6625 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6626 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6627 && ((nonzero_bits (f
, GET_MODE (f
))
6628 & ~GET_MODE_MASK (inner_mode
))
6631 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6632 extend_op
= ZERO_EXTEND
;
6638 machine_mode cm
= m
;
6639 if ((op
== ASHIFT
|| op
== LSHIFTRT
|| op
== ASHIFTRT
)
6640 && GET_MODE (c1
) != VOIDmode
)
6642 temp
= subst (simplify_gen_relational (true_code
, cm
, VOIDmode
,
6643 cond_op0
, cond_op1
),
6644 pc_rtx
, pc_rtx
, 0, 0, 0);
6645 temp
= simplify_gen_binary (MULT
, cm
, temp
,
6646 simplify_gen_binary (MULT
, cm
, c1
,
6648 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0, 0);
6649 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6651 if (extend_op
!= UNKNOWN
)
6652 temp
= simplify_gen_unary (extend_op
, int_mode
, temp
, m
);
6658 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6659 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6660 negation of a single bit, we can convert this operation to a shift. We
6661 can actually do this more generally, but it doesn't seem worth it. */
6664 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6665 && XEXP (cond
, 1) == const0_rtx
6666 && false_rtx
== const0_rtx
6667 && CONST_INT_P (true_rtx
)
6668 && ((nonzero_bits (XEXP (cond
, 0), int_mode
) == 1
6669 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6670 || ((num_sign_bit_copies (XEXP (cond
, 0), int_mode
)
6671 == GET_MODE_PRECISION (int_mode
))
6672 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6674 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6675 gen_lowpart (int_mode
, XEXP (cond
, 0)), i
);
6677 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6678 non-zero bit in A is C1. */
6679 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6680 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6681 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6682 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (cond
, 0)), &inner_mode
)
6683 && (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))
6684 == nonzero_bits (XEXP (cond
, 0), inner_mode
)
6685 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))) >= 0)
6687 rtx val
= XEXP (cond
, 0);
6688 if (inner_mode
== int_mode
)
6690 else if (GET_MODE_PRECISION (inner_mode
) < GET_MODE_PRECISION (int_mode
))
6691 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, val
, inner_mode
);
6697 /* Simplify X, a SET expression. Return the new expression. */
6700 simplify_set (rtx x
)
6702 rtx src
= SET_SRC (x
);
6703 rtx dest
= SET_DEST (x
);
6705 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6706 rtx_insn
*other_insn
;
6708 scalar_int_mode int_mode
;
6710 /* (set (pc) (return)) gets written as (return). */
6711 if (GET_CODE (dest
) == PC
&& ANY_RETURN_P (src
))
6714 /* Now that we know for sure which bits of SRC we are using, see if we can
6715 simplify the expression for the object knowing that we only need the
6718 if (GET_MODE_CLASS (mode
) == MODE_INT
&& HWI_COMPUTABLE_MODE_P (mode
))
6720 src
= force_to_mode (src
, mode
, HOST_WIDE_INT_M1U
, 0);
6721 SUBST (SET_SRC (x
), src
);
6724 /* If the source is a COMPARE, look for the use of the comparison result
6725 and try to simplify it unless we already have used undobuf.other_insn. */
6726 if ((GET_MODE_CLASS (mode
) == MODE_CC
|| GET_CODE (src
) == COMPARE
)
6727 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6728 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6729 && COMPARISON_P (*cc_use
)
6730 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6732 enum rtx_code old_code
= GET_CODE (*cc_use
);
6733 enum rtx_code new_code
;
6735 int other_changed
= 0;
6736 rtx inner_compare
= NULL_RTX
;
6737 machine_mode compare_mode
= GET_MODE (dest
);
6739 if (GET_CODE (src
) == COMPARE
)
6741 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6742 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6744 inner_compare
= op0
;
6745 op0
= XEXP (inner_compare
, 0), op1
= XEXP (inner_compare
, 1);
6749 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6751 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6754 new_code
= old_code
;
6755 else if (!CONSTANT_P (tmp
))
6757 new_code
= GET_CODE (tmp
);
6758 op0
= XEXP (tmp
, 0);
6759 op1
= XEXP (tmp
, 1);
6763 rtx pat
= PATTERN (other_insn
);
6764 undobuf
.other_insn
= other_insn
;
6765 SUBST (*cc_use
, tmp
);
6767 /* Attempt to simplify CC user. */
6768 if (GET_CODE (pat
) == SET
)
6770 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6771 if (new_rtx
!= NULL_RTX
)
6772 SUBST (SET_SRC (pat
), new_rtx
);
6775 /* Convert X into a no-op move. */
6776 SUBST (SET_DEST (x
), pc_rtx
);
6777 SUBST (SET_SRC (x
), pc_rtx
);
6781 /* Simplify our comparison, if possible. */
6782 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6784 #ifdef SELECT_CC_MODE
6785 /* If this machine has CC modes other than CCmode, check to see if we
6786 need to use a different CC mode here. */
6787 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6788 compare_mode
= GET_MODE (op0
);
6789 else if (inner_compare
6790 && GET_MODE_CLASS (GET_MODE (inner_compare
)) == MODE_CC
6791 && new_code
== old_code
6792 && op0
== XEXP (inner_compare
, 0)
6793 && op1
== XEXP (inner_compare
, 1))
6794 compare_mode
= GET_MODE (inner_compare
);
6796 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6798 /* If the mode changed, we have to change SET_DEST, the mode in the
6799 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6800 a hard register, just build new versions with the proper mode. If it
6801 is a pseudo, we lose unless it is only time we set the pseudo, in
6802 which case we can safely change its mode. */
6803 if (compare_mode
!= GET_MODE (dest
))
6805 if (can_change_dest_mode (dest
, 0, compare_mode
))
6807 unsigned int regno
= REGNO (dest
);
6810 if (regno
< FIRST_PSEUDO_REGISTER
)
6811 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6814 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6815 new_dest
= regno_reg_rtx
[regno
];
6818 SUBST (SET_DEST (x
), new_dest
);
6819 SUBST (XEXP (*cc_use
, 0), new_dest
);
6825 #endif /* SELECT_CC_MODE */
6827 /* If the code changed, we have to build a new comparison in
6828 undobuf.other_insn. */
6829 if (new_code
!= old_code
)
6831 int other_changed_previously
= other_changed
;
6832 unsigned HOST_WIDE_INT mask
;
6833 rtx old_cc_use
= *cc_use
;
6835 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6839 /* If the only change we made was to change an EQ into an NE or
6840 vice versa, OP0 has only one bit that might be nonzero, and OP1
6841 is zero, check if changing the user of the condition code will
6842 produce a valid insn. If it won't, we can keep the original code
6843 in that insn by surrounding our operation with an XOR. */
6845 if (((old_code
== NE
&& new_code
== EQ
)
6846 || (old_code
== EQ
&& new_code
== NE
))
6847 && ! other_changed_previously
&& op1
== const0_rtx
6848 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
6849 && pow2p_hwi (mask
= nonzero_bits (op0
, GET_MODE (op0
))))
6851 rtx pat
= PATTERN (other_insn
), note
= 0;
6853 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
6854 && ! check_asm_operands (pat
)))
6856 *cc_use
= old_cc_use
;
6859 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
), op0
,
6867 undobuf
.other_insn
= other_insn
;
6869 /* Don't generate a compare of a CC with 0, just use that CC. */
6870 if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
6872 SUBST (SET_SRC (x
), op0
);
6875 /* Otherwise, if we didn't previously have the same COMPARE we
6876 want, create it from scratch. */
6877 else if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
6878 || XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
6880 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6886 /* Get SET_SRC in a form where we have placed back any
6887 compound expressions. Then do the checks below. */
6888 src
= make_compound_operation (src
, SET
);
6889 SUBST (SET_SRC (x
), src
);
6892 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6893 and X being a REG or (subreg (reg)), we may be able to convert this to
6894 (set (subreg:m2 x) (op)).
6896 We can always do this if M1 is narrower than M2 because that means that
6897 we only care about the low bits of the result.
6899 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6900 perform a narrower operation than requested since the high-order bits will
6901 be undefined. On machine where it is defined, this transformation is safe
6902 as long as M1 and M2 have the same number of words. */
6904 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6905 && !OBJECT_P (SUBREG_REG (src
))
6906 && (known_equal_after_align_up
6907 (GET_MODE_SIZE (GET_MODE (src
)),
6908 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
))),
6910 && (WORD_REGISTER_OPERATIONS
|| !paradoxical_subreg_p (src
))
6911 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
6912 && !REG_CAN_CHANGE_MODE_P (REGNO (dest
),
6913 GET_MODE (SUBREG_REG (src
)),
6916 || (GET_CODE (dest
) == SUBREG
6917 && REG_P (SUBREG_REG (dest
)))))
6919 SUBST (SET_DEST (x
),
6920 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
6922 SUBST (SET_SRC (x
), SUBREG_REG (src
));
6924 src
= SET_SRC (x
), dest
= SET_DEST (x
);
6927 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6928 would require a paradoxical subreg. Replace the subreg with a
6929 zero_extend to avoid the reload that would otherwise be required.
6930 Don't do this unless we have a scalar integer mode, otherwise the
6931 transformation is incorrect. */
6933 enum rtx_code extend_op
;
6934 if (paradoxical_subreg_p (src
)
6935 && MEM_P (SUBREG_REG (src
))
6936 && SCALAR_INT_MODE_P (GET_MODE (src
))
6937 && (extend_op
= load_extend_op (GET_MODE (SUBREG_REG (src
)))) != UNKNOWN
)
6940 gen_rtx_fmt_e (extend_op
, GET_MODE (src
), SUBREG_REG (src
)));
6945 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6946 are comparing an item known to be 0 or -1 against 0, use a logical
6947 operation instead. Check for one of the arms being an IOR of the other
6948 arm with some value. We compute three terms to be IOR'ed together. In
6949 practice, at most two will be nonzero. Then we do the IOR's. */
6951 if (GET_CODE (dest
) != PC
6952 && GET_CODE (src
) == IF_THEN_ELSE
6953 && is_int_mode (GET_MODE (src
), &int_mode
)
6954 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
6955 && XEXP (XEXP (src
, 0), 1) == const0_rtx
6956 && int_mode
== GET_MODE (XEXP (XEXP (src
, 0), 0))
6957 && (!HAVE_conditional_move
6958 || ! can_conditionally_move_p (int_mode
))
6959 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0), int_mode
)
6960 == GET_MODE_PRECISION (int_mode
))
6961 && ! side_effects_p (src
))
6963 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6964 ? XEXP (src
, 1) : XEXP (src
, 2));
6965 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6966 ? XEXP (src
, 2) : XEXP (src
, 1));
6967 rtx term1
= const0_rtx
, term2
, term3
;
6969 if (GET_CODE (true_rtx
) == IOR
6970 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
6971 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
6972 else if (GET_CODE (true_rtx
) == IOR
6973 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
6974 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
6975 else if (GET_CODE (false_rtx
) == IOR
6976 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
6977 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
6978 else if (GET_CODE (false_rtx
) == IOR
6979 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
6980 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
6982 term2
= simplify_gen_binary (AND
, int_mode
,
6983 XEXP (XEXP (src
, 0), 0), true_rtx
);
6984 term3
= simplify_gen_binary (AND
, int_mode
,
6985 simplify_gen_unary (NOT
, int_mode
,
6986 XEXP (XEXP (src
, 0), 0),
6991 simplify_gen_binary (IOR
, int_mode
,
6992 simplify_gen_binary (IOR
, int_mode
,
6999 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7000 whole thing fail. */
7001 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
7003 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
7006 /* Convert this into a field assignment operation, if possible. */
7007 return make_field_assignment (x
);
7010 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7014 simplify_logical (rtx x
)
7016 rtx op0
= XEXP (x
, 0);
7017 rtx op1
= XEXP (x
, 1);
7018 scalar_int_mode mode
;
7020 switch (GET_CODE (x
))
7023 /* We can call simplify_and_const_int only if we don't lose
7024 any (sign) bits when converting INTVAL (op1) to
7025 "unsigned HOST_WIDE_INT". */
7026 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
7027 && CONST_INT_P (op1
)
7028 && (HWI_COMPUTABLE_MODE_P (mode
)
7029 || INTVAL (op1
) > 0))
7031 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
7032 if (GET_CODE (x
) != AND
)
7039 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7040 apply the distributive law and then the inverse distributive
7041 law to see if things simplify. */
7042 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
7044 rtx result
= distribute_and_simplify_rtx (x
, 0);
7048 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
7050 rtx result
= distribute_and_simplify_rtx (x
, 1);
7057 /* If we have (ior (and A B) C), apply the distributive law and then
7058 the inverse distributive law to see if things simplify. */
7060 if (GET_CODE (op0
) == AND
)
7062 rtx result
= distribute_and_simplify_rtx (x
, 0);
7067 if (GET_CODE (op1
) == AND
)
7069 rtx result
= distribute_and_simplify_rtx (x
, 1);
7082 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7083 operations" because they can be replaced with two more basic operations.
7084 ZERO_EXTEND is also considered "compound" because it can be replaced with
7085 an AND operation, which is simpler, though only one operation.
7087 The function expand_compound_operation is called with an rtx expression
7088 and will convert it to the appropriate shifts and AND operations,
7089 simplifying at each stage.
7091 The function make_compound_operation is called to convert an expression
7092 consisting of shifts and ANDs into the equivalent compound expression.
7093 It is the inverse of this function, loosely speaking. */
7096 expand_compound_operation (rtx x
)
7098 unsigned HOST_WIDE_INT pos
= 0, len
;
7100 unsigned int modewidth
;
7102 scalar_int_mode inner_mode
;
7104 switch (GET_CODE (x
))
7110 /* We can't necessarily use a const_int for a multiword mode;
7111 it depends on implicitly extending the value.
7112 Since we don't know the right way to extend it,
7113 we can't tell whether the implicit way is right.
7115 Even for a mode that is no wider than a const_int,
7116 we can't win, because we need to sign extend one of its bits through
7117 the rest of it, and we don't know which bit. */
7118 if (CONST_INT_P (XEXP (x
, 0)))
7121 /* Reject modes that aren't scalar integers because turning vector
7122 or complex modes into shifts causes problems. */
7123 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7126 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7127 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7128 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7129 reloaded. If not for that, MEM's would very rarely be safe.
7131 Reject modes bigger than a word, because we might not be able
7132 to reference a two-register group starting with an arbitrary register
7133 (and currently gen_lowpart might crash for a SUBREG). */
7135 if (GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7138 len
= GET_MODE_PRECISION (inner_mode
);
7139 /* If the inner object has VOIDmode (the only way this can happen
7140 is if it is an ASM_OPERANDS), we can't do anything since we don't
7141 know how much masking to do. */
7153 /* If the operand is a CLOBBER, just return it. */
7154 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
7157 if (!CONST_INT_P (XEXP (x
, 1))
7158 || !CONST_INT_P (XEXP (x
, 2)))
7161 /* Reject modes that aren't scalar integers because turning vector
7162 or complex modes into shifts causes problems. */
7163 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7166 len
= INTVAL (XEXP (x
, 1));
7167 pos
= INTVAL (XEXP (x
, 2));
7169 /* This should stay within the object being extracted, fail otherwise. */
7170 if (len
+ pos
> GET_MODE_PRECISION (inner_mode
))
7173 if (BITS_BIG_ENDIAN
)
7174 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
7182 /* We've rejected non-scalar operations by now. */
7183 scalar_int_mode mode
= as_a
<scalar_int_mode
> (GET_MODE (x
));
7185 /* Convert sign extension to zero extension, if we know that the high
7186 bit is not set, as this is easier to optimize. It will be converted
7187 back to cheaper alternative in make_extraction. */
7188 if (GET_CODE (x
) == SIGN_EXTEND
7189 && HWI_COMPUTABLE_MODE_P (mode
)
7190 && ((nonzero_bits (XEXP (x
, 0), inner_mode
)
7191 & ~(((unsigned HOST_WIDE_INT
) GET_MODE_MASK (inner_mode
)) >> 1))
7194 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, XEXP (x
, 0));
7195 rtx temp2
= expand_compound_operation (temp
);
7197 /* Make sure this is a profitable operation. */
7198 if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7199 > set_src_cost (temp2
, mode
, optimize_this_for_speed_p
))
7201 else if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7202 > set_src_cost (temp
, mode
, optimize_this_for_speed_p
))
7208 /* We can optimize some special cases of ZERO_EXTEND. */
7209 if (GET_CODE (x
) == ZERO_EXTEND
)
7211 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7212 know that the last value didn't have any inappropriate bits
7214 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7215 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7216 && HWI_COMPUTABLE_MODE_P (mode
)
7217 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), mode
)
7218 & ~GET_MODE_MASK (inner_mode
)) == 0)
7219 return XEXP (XEXP (x
, 0), 0);
7221 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7222 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7223 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7224 && subreg_lowpart_p (XEXP (x
, 0))
7225 && HWI_COMPUTABLE_MODE_P (mode
)
7226 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), mode
)
7227 & ~GET_MODE_MASK (inner_mode
)) == 0)
7228 return SUBREG_REG (XEXP (x
, 0));
7230 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7231 is a comparison and STORE_FLAG_VALUE permits. This is like
7232 the first case, but it works even when MODE is larger
7233 than HOST_WIDE_INT. */
7234 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7235 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7236 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
7237 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7238 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7239 return XEXP (XEXP (x
, 0), 0);
7241 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7242 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7243 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7244 && subreg_lowpart_p (XEXP (x
, 0))
7245 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
7246 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7247 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7248 return SUBREG_REG (XEXP (x
, 0));
7252 /* If we reach here, we want to return a pair of shifts. The inner
7253 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7254 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7255 logical depending on the value of UNSIGNEDP.
7257 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7258 converted into an AND of a shift.
7260 We must check for the case where the left shift would have a negative
7261 count. This can happen in a case like (x >> 31) & 255 on machines
7262 that can't shift by a constant. On those machines, we would first
7263 combine the shift with the AND to produce a variable-position
7264 extraction. Then the constant of 31 would be substituted in
7265 to produce such a position. */
7267 modewidth
= GET_MODE_PRECISION (mode
);
7268 if (modewidth
>= pos
+ len
)
7270 tem
= gen_lowpart (mode
, XEXP (x
, 0));
7271 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7273 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
7274 tem
, modewidth
- pos
- len
);
7275 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
7276 mode
, tem
, modewidth
- len
);
7278 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
7280 tem
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, inner_mode
,
7282 tem
= gen_lowpart (mode
, tem
);
7283 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7285 tem
= simplify_and_const_int (NULL_RTX
, mode
, tem
,
7286 (HOST_WIDE_INT_1U
<< len
) - 1);
7289 /* Any other cases we can't handle. */
7292 /* If we couldn't do this for some reason, return the original
7294 if (GET_CODE (tem
) == CLOBBER
)
7300 /* X is a SET which contains an assignment of one object into
7301 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7302 or certain SUBREGS). If possible, convert it into a series of
7305 We half-heartedly support variable positions, but do not at all
7306 support variable lengths. */
7309 expand_field_assignment (const_rtx x
)
7312 rtx pos
; /* Always counts from low bit. */
7314 rtx mask
, cleared
, masked
;
7315 scalar_int_mode compute_mode
;
7317 /* Loop until we find something we can't simplify. */
7320 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
7321 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
7323 rtx x0
= XEXP (SET_DEST (x
), 0);
7324 if (!GET_MODE_PRECISION (GET_MODE (x0
)).is_constant (&len
))
7326 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
7327 pos
= gen_int_mode (subreg_lsb (XEXP (SET_DEST (x
), 0)),
7330 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
7331 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
7333 inner
= XEXP (SET_DEST (x
), 0);
7334 if (!GET_MODE_PRECISION (GET_MODE (inner
)).is_constant (&inner_len
))
7337 len
= INTVAL (XEXP (SET_DEST (x
), 1));
7338 pos
= XEXP (SET_DEST (x
), 2);
7340 /* A constant position should stay within the width of INNER. */
7341 if (CONST_INT_P (pos
) && INTVAL (pos
) + len
> inner_len
)
7344 if (BITS_BIG_ENDIAN
)
7346 if (CONST_INT_P (pos
))
7347 pos
= GEN_INT (inner_len
- len
- INTVAL (pos
));
7348 else if (GET_CODE (pos
) == MINUS
7349 && CONST_INT_P (XEXP (pos
, 1))
7350 && INTVAL (XEXP (pos
, 1)) == inner_len
- len
)
7351 /* If position is ADJUST - X, new position is X. */
7352 pos
= XEXP (pos
, 0);
7354 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
7355 gen_int_mode (inner_len
- len
,
7361 /* If the destination is a subreg that overwrites the whole of the inner
7362 register, we can move the subreg to the source. */
7363 else if (GET_CODE (SET_DEST (x
)) == SUBREG
7364 /* We need SUBREGs to compute nonzero_bits properly. */
7365 && nonzero_sign_valid
7366 && !read_modify_subreg_p (SET_DEST (x
)))
7368 x
= gen_rtx_SET (SUBREG_REG (SET_DEST (x
)),
7370 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
7377 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7378 inner
= SUBREG_REG (inner
);
7380 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7381 if (!is_a
<scalar_int_mode
> (GET_MODE (inner
), &compute_mode
))
7383 /* Don't do anything for vector or complex integral types. */
7384 if (! FLOAT_MODE_P (GET_MODE (inner
)))
7387 /* Try to find an integral mode to pun with. */
7388 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner
)), 0)
7389 .exists (&compute_mode
))
7392 inner
= gen_lowpart (compute_mode
, inner
);
7395 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7396 if (len
>= HOST_BITS_PER_WIDE_INT
)
7399 /* Don't try to compute in too wide unsupported modes. */
7400 if (!targetm
.scalar_mode_supported_p (compute_mode
))
7403 /* Now compute the equivalent expression. Make a copy of INNER
7404 for the SET_DEST in case it is a MEM into which we will substitute;
7405 we don't want shared RTL in that case. */
7406 mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< len
) - 1,
7408 cleared
= simplify_gen_binary (AND
, compute_mode
,
7409 simplify_gen_unary (NOT
, compute_mode
,
7410 simplify_gen_binary (ASHIFT
,
7415 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
7416 simplify_gen_binary (
7418 gen_lowpart (compute_mode
, SET_SRC (x
)),
7422 x
= gen_rtx_SET (copy_rtx (inner
),
7423 simplify_gen_binary (IOR
, compute_mode
,
7430 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7431 it is an RTX that represents the (variable) starting position; otherwise,
7432 POS is the (constant) starting bit position. Both are counted from the LSB.
7434 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7436 IN_DEST is nonzero if this is a reference in the destination of a SET.
7437 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7438 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7441 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7442 ZERO_EXTRACT should be built even for bits starting at bit 0.
7444 MODE is the desired mode of the result (if IN_DEST == 0).
7446 The result is an RTX for the extraction or NULL_RTX if the target
7450 make_extraction (machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7451 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7452 int in_dest
, int in_compare
)
7454 /* This mode describes the size of the storage area
7455 to fetch the overall value from. Within that, we
7456 ignore the POS lowest bits, etc. */
7457 machine_mode is_mode
= GET_MODE (inner
);
7458 machine_mode inner_mode
;
7459 scalar_int_mode wanted_inner_mode
;
7460 scalar_int_mode wanted_inner_reg_mode
= word_mode
;
7461 scalar_int_mode pos_mode
= word_mode
;
7462 machine_mode extraction_mode
= word_mode
;
7464 rtx orig_pos_rtx
= pos_rtx
;
7465 HOST_WIDE_INT orig_pos
;
7467 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7468 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7470 if (GET_CODE (inner
) == SUBREG
7471 && subreg_lowpart_p (inner
)
7472 && (paradoxical_subreg_p (inner
)
7473 /* If trying or potentionally trying to extract
7474 bits outside of is_mode, don't look through
7475 non-paradoxical SUBREGs. See PR82192. */
7476 || (pos_rtx
== NULL_RTX
7477 && known_le (pos
+ len
, GET_MODE_PRECISION (is_mode
)))))
7479 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7480 consider just the QI as the memory to extract from.
7481 The subreg adds or removes high bits; its mode is
7482 irrelevant to the meaning of this extraction,
7483 since POS and LEN count from the lsb. */
7484 if (MEM_P (SUBREG_REG (inner
)))
7485 is_mode
= GET_MODE (SUBREG_REG (inner
));
7486 inner
= SUBREG_REG (inner
);
7488 else if (GET_CODE (inner
) == ASHIFT
7489 && CONST_INT_P (XEXP (inner
, 1))
7490 && pos_rtx
== 0 && pos
== 0
7491 && len
> UINTVAL (XEXP (inner
, 1)))
7493 /* We're extracting the least significant bits of an rtx
7494 (ashift X (const_int C)), where LEN > C. Extract the
7495 least significant (LEN - C) bits of X, giving an rtx
7496 whose mode is MODE, then shift it left C times. */
7497 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7498 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7499 unsignedp
, in_dest
, in_compare
);
7501 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7503 else if (GET_CODE (inner
) == MULT
7504 && CONST_INT_P (XEXP (inner
, 1))
7505 && pos_rtx
== 0 && pos
== 0)
7507 /* We're extracting the least significant bits of an rtx
7508 (mult X (const_int 2^C)), where LEN > C. Extract the
7509 least significant (LEN - C) bits of X, giving an rtx
7510 whose mode is MODE, then multiply it by 2^C. */
7511 const HOST_WIDE_INT shift_amt
= exact_log2 (INTVAL (XEXP (inner
, 1)));
7512 if (IN_RANGE (shift_amt
, 1, len
- 1))
7514 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7515 0, 0, len
- shift_amt
,
7516 unsignedp
, in_dest
, in_compare
);
7518 return gen_rtx_MULT (mode
, new_rtx
, XEXP (inner
, 1));
7521 else if (GET_CODE (inner
) == TRUNCATE
7522 /* If trying or potentionally trying to extract
7523 bits outside of is_mode, don't look through
7524 TRUNCATE. See PR82192. */
7525 && pos_rtx
== NULL_RTX
7526 && known_le (pos
+ len
, GET_MODE_PRECISION (is_mode
)))
7527 inner
= XEXP (inner
, 0);
7529 inner_mode
= GET_MODE (inner
);
7531 /* See if this can be done without an extraction. We never can if the
7532 width of the field is not the same as that of some integer mode. For
7533 registers, we can only avoid the extraction if the position is at the
7534 low-order bit and this is either not in the destination or we have the
7535 appropriate STRICT_LOW_PART operation available.
7537 For MEM, we can avoid an extract if the field starts on an appropriate
7538 boundary and we can change the mode of the memory reference. */
7540 scalar_int_mode tmode
;
7541 if (int_mode_for_size (len
, 1).exists (&tmode
)
7542 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7544 && (pos
== 0 || REG_P (inner
))
7545 && (inner_mode
== tmode
7547 || TRULY_NOOP_TRUNCATION_MODES_P (tmode
, inner_mode
)
7548 || reg_truncated_to_mode (tmode
, inner
))
7551 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7552 || (MEM_P (inner
) && pos_rtx
== 0
7554 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7555 : BITS_PER_UNIT
)) == 0
7556 /* We can't do this if we are widening INNER_MODE (it
7557 may not be aligned, for one thing). */
7558 && !paradoxical_subreg_p (tmode
, inner_mode
)
7559 && known_le (pos
+ len
, GET_MODE_PRECISION (is_mode
))
7560 && (inner_mode
== tmode
7561 || (! mode_dependent_address_p (XEXP (inner
, 0),
7562 MEM_ADDR_SPACE (inner
))
7563 && ! MEM_VOLATILE_P (inner
))))))
7565 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7566 field. If the original and current mode are the same, we need not
7567 adjust the offset. Otherwise, we do if bytes big endian.
7569 If INNER is not a MEM, get a piece consisting of just the field
7570 of interest (in this case POS % BITS_PER_WORD must be 0). */
7576 /* POS counts from lsb, but make OFFSET count in memory order. */
7577 if (BYTES_BIG_ENDIAN
)
7578 offset
= bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode
)
7581 offset
= pos
/ BITS_PER_UNIT
;
7583 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7585 else if (REG_P (inner
))
7587 if (tmode
!= inner_mode
)
7589 /* We can't call gen_lowpart in a DEST since we
7590 always want a SUBREG (see below) and it would sometimes
7591 return a new hard register. */
7595 = subreg_offset_from_lsb (tmode
, inner_mode
, pos
);
7597 /* Avoid creating invalid subregs, for example when
7598 simplifying (x>>32)&255. */
7599 if (!validate_subreg (tmode
, inner_mode
, inner
, offset
))
7602 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, offset
);
7605 new_rtx
= gen_lowpart (tmode
, inner
);
7611 new_rtx
= force_to_mode (inner
, tmode
,
7612 len
>= HOST_BITS_PER_WIDE_INT
7614 : (HOST_WIDE_INT_1U
<< len
) - 1, 0);
7616 /* If this extraction is going into the destination of a SET,
7617 make a STRICT_LOW_PART unless we made a MEM. */
7620 return (MEM_P (new_rtx
) ? new_rtx
7621 : (GET_CODE (new_rtx
) != SUBREG
7622 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7623 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7628 if (CONST_SCALAR_INT_P (new_rtx
))
7629 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7630 mode
, new_rtx
, tmode
);
7632 /* If we know that no extraneous bits are set, and that the high
7633 bit is not set, convert the extraction to the cheaper of
7634 sign and zero extension, that are equivalent in these cases. */
7635 if (flag_expensive_optimizations
7636 && (HWI_COMPUTABLE_MODE_P (tmode
)
7637 && ((nonzero_bits (new_rtx
, tmode
)
7638 & ~(((unsigned HOST_WIDE_INT
)GET_MODE_MASK (tmode
)) >> 1))
7641 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7642 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7644 /* Prefer ZERO_EXTENSION, since it gives more information to
7646 if (set_src_cost (temp
, mode
, optimize_this_for_speed_p
)
7647 <= set_src_cost (temp1
, mode
, optimize_this_for_speed_p
))
7652 /* Otherwise, sign- or zero-extend unless we already are in the
7655 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7659 /* Unless this is a COMPARE or we have a funny memory reference,
7660 don't do anything with zero-extending field extracts starting at
7661 the low-order bit since they are simple AND operations. */
7662 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7663 && ! in_compare
&& unsignedp
)
7666 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7667 if the position is not a constant and the length is not 1. In all
7668 other cases, we would only be going outside our object in cases when
7669 an original shift would have been undefined. */
7671 && ((pos_rtx
== 0 && maybe_gt (pos
+ len
, GET_MODE_PRECISION (is_mode
)))
7672 || (pos_rtx
!= 0 && len
!= 1)))
7675 enum extraction_pattern pattern
= (in_dest
? EP_insv
7676 : unsignedp
? EP_extzv
: EP_extv
);
7678 /* If INNER is not from memory, we want it to have the mode of a register
7679 extraction pattern's structure operand, or word_mode if there is no
7680 such pattern. The same applies to extraction_mode and pos_mode
7681 and their respective operands.
7683 For memory, assume that the desired extraction_mode and pos_mode
7684 are the same as for a register operation, since at present we don't
7685 have named patterns for aligned memory structures. */
7686 class extraction_insn insn
;
7687 unsigned int inner_size
;
7688 if (GET_MODE_BITSIZE (inner_mode
).is_constant (&inner_size
)
7689 && get_best_reg_extraction_insn (&insn
, pattern
, inner_size
, mode
))
7691 wanted_inner_reg_mode
= insn
.struct_mode
.require ();
7692 pos_mode
= insn
.pos_mode
;
7693 extraction_mode
= insn
.field_mode
;
7696 /* Never narrow an object, since that might not be safe. */
7698 if (mode
!= VOIDmode
7699 && partial_subreg_p (extraction_mode
, mode
))
7700 extraction_mode
= mode
;
7702 /* Punt if len is too large for extraction_mode. */
7703 if (maybe_gt (len
, GET_MODE_PRECISION (extraction_mode
)))
7707 wanted_inner_mode
= wanted_inner_reg_mode
;
7710 /* Be careful not to go beyond the extracted object and maintain the
7711 natural alignment of the memory. */
7712 wanted_inner_mode
= smallest_int_mode_for_size (len
);
7713 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7714 > GET_MODE_BITSIZE (wanted_inner_mode
))
7715 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
).require ();
7720 if (BITS_BIG_ENDIAN
)
7722 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7723 BITS_BIG_ENDIAN style. If position is constant, compute new
7724 position. Otherwise, build subtraction.
7725 Note that POS is relative to the mode of the original argument.
7726 If it's a MEM we need to recompute POS relative to that.
7727 However, if we're extracting from (or inserting into) a register,
7728 we want to recompute POS relative to wanted_inner_mode. */
7731 width
= GET_MODE_BITSIZE (wanted_inner_mode
);
7732 else if (!GET_MODE_BITSIZE (is_mode
).is_constant (&width
))
7736 pos
= width
- len
- pos
;
7739 = gen_rtx_MINUS (GET_MODE (pos_rtx
),
7740 gen_int_mode (width
- len
, GET_MODE (pos_rtx
)),
7742 /* POS may be less than 0 now, but we check for that below.
7743 Note that it can only be less than 0 if !MEM_P (inner). */
7746 /* If INNER has a wider mode, and this is a constant extraction, try to
7747 make it smaller and adjust the byte to point to the byte containing
7749 if (wanted_inner_mode
!= VOIDmode
7750 && inner_mode
!= wanted_inner_mode
7752 && partial_subreg_p (wanted_inner_mode
, is_mode
)
7754 && ! mode_dependent_address_p (XEXP (inner
, 0), MEM_ADDR_SPACE (inner
))
7755 && ! MEM_VOLATILE_P (inner
))
7757 poly_int64 offset
= 0;
7759 /* The computations below will be correct if the machine is big
7760 endian in both bits and bytes or little endian in bits and bytes.
7761 If it is mixed, we must adjust. */
7763 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7764 adjust OFFSET to compensate. */
7765 if (BYTES_BIG_ENDIAN
7766 && paradoxical_subreg_p (is_mode
, inner_mode
))
7767 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7769 /* We can now move to the desired byte. */
7770 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7771 * GET_MODE_SIZE (wanted_inner_mode
);
7772 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7774 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7775 && is_mode
!= wanted_inner_mode
)
7776 offset
= (GET_MODE_SIZE (is_mode
)
7777 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7779 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7782 /* If INNER is not memory, get it into the proper mode. If we are changing
7783 its mode, POS must be a constant and smaller than the size of the new
7785 else if (!MEM_P (inner
))
7787 /* On the LHS, don't create paradoxical subregs implicitely truncating
7788 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7790 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner
),
7794 if (GET_MODE (inner
) != wanted_inner_mode
7796 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7802 inner
= force_to_mode (inner
, wanted_inner_mode
,
7804 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7806 : (((HOST_WIDE_INT_1U
<< len
) - 1)
7811 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7812 have to zero extend. Otherwise, we can just use a SUBREG.
7814 We dealt with constant rtxes earlier, so pos_rtx cannot
7815 have VOIDmode at this point. */
7817 && (GET_MODE_SIZE (pos_mode
)
7818 > GET_MODE_SIZE (as_a
<scalar_int_mode
> (GET_MODE (pos_rtx
)))))
7820 rtx temp
= simplify_gen_unary (ZERO_EXTEND
, pos_mode
, pos_rtx
,
7821 GET_MODE (pos_rtx
));
7823 /* If we know that no extraneous bits are set, and that the high
7824 bit is not set, convert extraction to cheaper one - either
7825 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7827 if (flag_expensive_optimizations
7828 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx
))
7829 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7830 & ~(((unsigned HOST_WIDE_INT
)
7831 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7835 rtx temp1
= simplify_gen_unary (SIGN_EXTEND
, pos_mode
, pos_rtx
,
7836 GET_MODE (pos_rtx
));
7838 /* Prefer ZERO_EXTENSION, since it gives more information to
7840 if (set_src_cost (temp1
, pos_mode
, optimize_this_for_speed_p
)
7841 < set_src_cost (temp
, pos_mode
, optimize_this_for_speed_p
))
7847 /* Make POS_RTX unless we already have it and it is correct. If we don't
7848 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7850 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
7851 pos_rtx
= orig_pos_rtx
;
7853 else if (pos_rtx
== 0)
7854 pos_rtx
= GEN_INT (pos
);
7856 /* Make the required operation. See if we can use existing rtx. */
7857 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
7858 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
7860 new_rtx
= gen_lowpart (mode
, new_rtx
);
7865 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7866 can be commuted with any other operations in X. Return X without
7867 that shift if so. */
7870 extract_left_shift (scalar_int_mode mode
, rtx x
, int count
)
7872 enum rtx_code code
= GET_CODE (x
);
7878 /* This is the shift itself. If it is wide enough, we will return
7879 either the value being shifted if the shift count is equal to
7880 COUNT or a shift for the difference. */
7881 if (CONST_INT_P (XEXP (x
, 1))
7882 && INTVAL (XEXP (x
, 1)) >= count
)
7883 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
7884 INTVAL (XEXP (x
, 1)) - count
);
7888 if ((tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
7889 return simplify_gen_unary (code
, mode
, tem
, mode
);
7893 case PLUS
: case IOR
: case XOR
: case AND
:
7894 /* If we can safely shift this constant and we find the inner shift,
7895 make a new operation. */
7896 if (CONST_INT_P (XEXP (x
, 1))
7897 && (UINTVAL (XEXP (x
, 1))
7898 & (((HOST_WIDE_INT_1U
<< count
)) - 1)) == 0
7899 && (tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
7901 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1)) >> count
;
7902 return simplify_gen_binary (code
, mode
, tem
,
7903 gen_int_mode (val
, mode
));
7914 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7915 level of the expression and MODE is its mode. IN_CODE is as for
7916 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7917 that should be used when recursing on operands of *X_PTR.
7919 There are two possible actions:
7921 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7922 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7924 - Return a new rtx, which the caller returns directly. */
7927 make_compound_operation_int (scalar_int_mode mode
, rtx
*x_ptr
,
7928 enum rtx_code in_code
,
7929 enum rtx_code
*next_code_ptr
)
7932 enum rtx_code next_code
= *next_code_ptr
;
7933 enum rtx_code code
= GET_CODE (x
);
7934 int mode_width
= GET_MODE_PRECISION (mode
);
7939 scalar_int_mode inner_mode
;
7940 bool equality_comparison
= false;
7944 equality_comparison
= true;
7948 /* Process depending on the code of this operation. If NEW is set
7949 nonzero, it will be returned. */
7954 /* Convert shifts by constants into multiplications if inside
7956 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
7957 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
7958 && INTVAL (XEXP (x
, 1)) >= 0)
7960 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
7961 HOST_WIDE_INT multval
= HOST_WIDE_INT_1
<< count
;
7963 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7964 if (GET_CODE (new_rtx
) == NEG
)
7966 new_rtx
= XEXP (new_rtx
, 0);
7969 multval
= trunc_int_for_mode (multval
, mode
);
7970 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, gen_int_mode (multval
, mode
));
7977 lhs
= make_compound_operation (lhs
, next_code
);
7978 rhs
= make_compound_operation (rhs
, next_code
);
7979 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
)
7981 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
7983 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7985 else if (GET_CODE (lhs
) == MULT
7986 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
7988 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
7989 simplify_gen_unary (NEG
, mode
,
7992 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7996 SUBST (XEXP (x
, 0), lhs
);
7997 SUBST (XEXP (x
, 1), rhs
);
7999 maybe_swap_commutative_operands (x
);
8005 lhs
= make_compound_operation (lhs
, next_code
);
8006 rhs
= make_compound_operation (rhs
, next_code
);
8007 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
)
8009 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
8011 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
8013 else if (GET_CODE (rhs
) == MULT
8014 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
8016 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
8017 simplify_gen_unary (NEG
, mode
,
8020 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
8024 SUBST (XEXP (x
, 0), lhs
);
8025 SUBST (XEXP (x
, 1), rhs
);
8030 /* If the second operand is not a constant, we can't do anything
8032 if (!CONST_INT_P (XEXP (x
, 1)))
8035 /* If the constant is a power of two minus one and the first operand
8036 is a logical right shift, make an extraction. */
8037 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8038 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8040 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8041 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1),
8042 i
, 1, 0, in_code
== COMPARE
);
8045 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8046 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
8047 && subreg_lowpart_p (XEXP (x
, 0))
8048 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (XEXP (x
, 0))),
8050 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
8051 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8053 rtx inner_x0
= SUBREG_REG (XEXP (x
, 0));
8054 new_rtx
= make_compound_operation (XEXP (inner_x0
, 0), next_code
);
8055 new_rtx
= make_extraction (inner_mode
, new_rtx
, 0,
8057 i
, 1, 0, in_code
== COMPARE
);
8059 /* If we narrowed the mode when dropping the subreg, then we lose. */
8060 if (GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (mode
))
8063 /* If that didn't give anything, see if the AND simplifies on
8065 if (!new_rtx
&& i
>= 0)
8067 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8068 new_rtx
= make_extraction (mode
, new_rtx
, 0, NULL_RTX
, i
, 1,
8069 0, in_code
== COMPARE
);
8072 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8073 else if ((GET_CODE (XEXP (x
, 0)) == XOR
8074 || GET_CODE (XEXP (x
, 0)) == IOR
)
8075 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
8076 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
8077 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8079 /* Apply the distributive law, and then try to make extractions. */
8080 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
8081 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
8083 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
8085 new_rtx
= make_compound_operation (new_rtx
, in_code
);
8088 /* If we are have (and (rotate X C) M) and C is larger than the number
8089 of bits in M, this is an extraction. */
8091 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
8092 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8093 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
8094 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
8096 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8097 new_rtx
= make_extraction (mode
, new_rtx
,
8098 (GET_MODE_PRECISION (mode
)
8099 - INTVAL (XEXP (XEXP (x
, 0), 1))),
8100 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8103 /* On machines without logical shifts, if the operand of the AND is
8104 a logical shift and our mask turns off all the propagated sign
8105 bits, we can replace the logical shift with an arithmetic shift. */
8106 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8107 && !have_insn_for (LSHIFTRT
, mode
)
8108 && have_insn_for (ASHIFTRT
, mode
)
8109 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8110 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8111 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8112 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
8114 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
8116 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
8117 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
8119 gen_rtx_ASHIFTRT (mode
,
8120 make_compound_operation (XEXP (XEXP (x
,
8124 XEXP (XEXP (x
, 0), 1)));
8127 /* If the constant is one less than a power of two, this might be
8128 representable by an extraction even if no shift is present.
8129 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8130 we are in a COMPARE. */
8131 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8132 new_rtx
= make_extraction (mode
,
8133 make_compound_operation (XEXP (x
, 0),
8135 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8137 /* If we are in a comparison and this is an AND with a power of two,
8138 convert this into the appropriate bit extract. */
8139 else if (in_code
== COMPARE
8140 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
8141 && (equality_comparison
|| i
< GET_MODE_PRECISION (mode
) - 1))
8142 new_rtx
= make_extraction (mode
,
8143 make_compound_operation (XEXP (x
, 0),
8145 i
, NULL_RTX
, 1, 1, 0, 1);
8147 /* If the one operand is a paradoxical subreg of a register or memory and
8148 the constant (limited to the smaller mode) has only zero bits where
8149 the sub expression has known zero bits, this can be expressed as
8151 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
)
8155 sub
= XEXP (XEXP (x
, 0), 0);
8156 machine_mode sub_mode
= GET_MODE (sub
);
8158 if ((REG_P (sub
) || MEM_P (sub
))
8159 && GET_MODE_PRECISION (sub_mode
).is_constant (&sub_width
)
8160 && sub_width
< mode_width
)
8162 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (sub_mode
);
8163 unsigned HOST_WIDE_INT mask
;
8165 /* original AND constant with all the known zero bits set */
8166 mask
= UINTVAL (XEXP (x
, 1)) | (~nonzero_bits (sub
, sub_mode
));
8167 if ((mask
& mode_mask
) == mode_mask
)
8169 new_rtx
= make_compound_operation (sub
, next_code
);
8170 new_rtx
= make_extraction (mode
, new_rtx
, 0, 0, sub_width
,
8171 1, 0, in_code
== COMPARE
);
8179 /* If the sign bit is known to be zero, replace this with an
8180 arithmetic shift. */
8181 if (have_insn_for (ASHIFTRT
, mode
)
8182 && ! have_insn_for (LSHIFTRT
, mode
)
8183 && mode_width
<= HOST_BITS_PER_WIDE_INT
8184 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
8186 new_rtx
= gen_rtx_ASHIFTRT (mode
,
8187 make_compound_operation (XEXP (x
, 0),
8199 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8200 this is a SIGN_EXTRACT. */
8201 if (CONST_INT_P (rhs
)
8202 && GET_CODE (lhs
) == ASHIFT
8203 && CONST_INT_P (XEXP (lhs
, 1))
8204 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
8205 && INTVAL (XEXP (lhs
, 1)) >= 0
8206 && INTVAL (rhs
) < mode_width
)
8208 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
8209 new_rtx
= make_extraction (mode
, new_rtx
,
8210 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
8211 NULL_RTX
, mode_width
- INTVAL (rhs
),
8212 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8216 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8217 If so, try to merge the shifts into a SIGN_EXTEND. We could
8218 also do this for some cases of SIGN_EXTRACT, but it doesn't
8219 seem worth the effort; the case checked for occurs on Alpha. */
8222 && ! (GET_CODE (lhs
) == SUBREG
8223 && (OBJECT_P (SUBREG_REG (lhs
))))
8224 && CONST_INT_P (rhs
)
8225 && INTVAL (rhs
) >= 0
8226 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
8227 && INTVAL (rhs
) < mode_width
8228 && (new_rtx
= extract_left_shift (mode
, lhs
, INTVAL (rhs
))) != 0)
8229 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
,
8231 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
8232 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8237 /* Call ourselves recursively on the inner expression. If we are
8238 narrowing the object and it has a different RTL code from
8239 what it originally did, do this SUBREG as a force_to_mode. */
8241 rtx inner
= SUBREG_REG (x
), simplified
;
8242 enum rtx_code subreg_code
= in_code
;
8244 /* If the SUBREG is masking of a logical right shift,
8245 make an extraction. */
8246 if (GET_CODE (inner
) == LSHIFTRT
8247 && is_a
<scalar_int_mode
> (GET_MODE (inner
), &inner_mode
)
8248 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (inner_mode
)
8249 && CONST_INT_P (XEXP (inner
, 1))
8250 && UINTVAL (XEXP (inner
, 1)) < GET_MODE_PRECISION (inner_mode
)
8251 && subreg_lowpart_p (x
))
8253 new_rtx
= make_compound_operation (XEXP (inner
, 0), next_code
);
8254 int width
= GET_MODE_PRECISION (inner_mode
)
8255 - INTVAL (XEXP (inner
, 1));
8256 if (width
> mode_width
)
8258 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (inner
, 1),
8259 width
, 1, 0, in_code
== COMPARE
);
8263 /* If in_code is COMPARE, it isn't always safe to pass it through
8264 to the recursive make_compound_operation call. */
8265 if (subreg_code
== COMPARE
8266 && (!subreg_lowpart_p (x
)
8267 || GET_CODE (inner
) == SUBREG
8268 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8269 is (const_int 0), rather than
8270 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8271 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8272 for non-equality comparisons against 0 is not equivalent
8273 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8274 || (GET_CODE (inner
) == AND
8275 && CONST_INT_P (XEXP (inner
, 1))
8276 && partial_subreg_p (x
)
8277 && exact_log2 (UINTVAL (XEXP (inner
, 1)))
8278 >= GET_MODE_BITSIZE (mode
) - 1)))
8281 tem
= make_compound_operation (inner
, subreg_code
);
8284 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
8288 if (GET_CODE (tem
) != GET_CODE (inner
)
8289 && partial_subreg_p (x
)
8290 && subreg_lowpart_p (x
))
8293 = force_to_mode (tem
, mode
, HOST_WIDE_INT_M1U
, 0);
8295 /* If we have something other than a SUBREG, we might have
8296 done an expansion, so rerun ourselves. */
8297 if (GET_CODE (newer
) != SUBREG
)
8298 newer
= make_compound_operation (newer
, in_code
);
8300 /* force_to_mode can expand compounds. If it just re-expanded
8301 the compound, use gen_lowpart to convert to the desired
8303 if (rtx_equal_p (newer
, x
)
8304 /* Likewise if it re-expanded the compound only partially.
8305 This happens for SUBREG of ZERO_EXTRACT if they extract
8306 the same number of bits. */
8307 || (GET_CODE (newer
) == SUBREG
8308 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
8309 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
8310 && GET_CODE (inner
) == AND
8311 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
8312 return gen_lowpart (GET_MODE (x
), tem
);
8327 *x_ptr
= gen_lowpart (mode
, new_rtx
);
8328 *next_code_ptr
= next_code
;
8332 /* Look at the expression rooted at X. Look for expressions
8333 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8334 Form these expressions.
8336 Return the new rtx, usually just X.
8338 Also, for machines like the VAX that don't have logical shift insns,
8339 try to convert logical to arithmetic shift operations in cases where
8340 they are equivalent. This undoes the canonicalizations to logical
8341 shifts done elsewhere.
8343 We try, as much as possible, to re-use rtl expressions to save memory.
8345 IN_CODE says what kind of expression we are processing. Normally, it is
8346 SET. In a memory address it is MEM. When processing the arguments of
8347 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8348 precisely it is an equality comparison against zero. */
8351 make_compound_operation (rtx x
, enum rtx_code in_code
)
8353 enum rtx_code code
= GET_CODE (x
);
8356 enum rtx_code next_code
;
8359 /* Select the code to be used in recursive calls. Once we are inside an
8360 address, we stay there. If we have a comparison, set to COMPARE,
8361 but once inside, go back to our default of SET. */
8363 next_code
= (code
== MEM
? MEM
8364 : ((code
== COMPARE
|| COMPARISON_P (x
))
8365 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
8366 : in_code
== COMPARE
|| in_code
== EQ
? SET
: in_code
);
8368 scalar_int_mode mode
;
8369 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
8371 rtx new_rtx
= make_compound_operation_int (mode
, &x
, in_code
,
8375 code
= GET_CODE (x
);
8378 /* Now recursively process each operand of this operation. We need to
8379 handle ZERO_EXTEND specially so that we don't lose track of the
8381 if (code
== ZERO_EXTEND
)
8383 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8384 tem
= simplify_const_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8385 new_rtx
, GET_MODE (XEXP (x
, 0)));
8388 SUBST (XEXP (x
, 0), new_rtx
);
8392 fmt
= GET_RTX_FORMAT (code
);
8393 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
8396 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
8397 SUBST (XEXP (x
, i
), new_rtx
);
8399 else if (fmt
[i
] == 'E')
8400 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8402 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
8403 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
8406 maybe_swap_commutative_operands (x
);
8410 /* Given M see if it is a value that would select a field of bits
8411 within an item, but not the entire word. Return -1 if not.
8412 Otherwise, return the starting position of the field, where 0 is the
8415 *PLEN is set to the length of the field. */
8418 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
8420 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8421 int pos
= m
? ctz_hwi (m
) : -1;
8425 /* Now shift off the low-order zero bits and see if we have a
8426 power of two minus 1. */
8427 len
= exact_log2 ((m
>> pos
) + 1);
8436 /* If X refers to a register that equals REG in value, replace these
8437 references with REG. */
8439 canon_reg_for_combine (rtx x
, rtx reg
)
8446 enum rtx_code code
= GET_CODE (x
);
8447 switch (GET_RTX_CLASS (code
))
8450 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8451 if (op0
!= XEXP (x
, 0))
8452 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
8457 case RTX_COMM_ARITH
:
8458 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8459 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8460 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8461 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
8465 case RTX_COMM_COMPARE
:
8466 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8467 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8468 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8469 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
8470 GET_MODE (op0
), op0
, op1
);
8474 case RTX_BITFIELD_OPS
:
8475 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8476 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8477 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
8478 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
8479 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
8480 GET_MODE (op0
), op0
, op1
, op2
);
8486 if (rtx_equal_p (get_last_value (reg
), x
)
8487 || rtx_equal_p (reg
, get_last_value (x
)))
8496 fmt
= GET_RTX_FORMAT (code
);
8498 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8501 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
8502 if (op
!= XEXP (x
, i
))
8512 else if (fmt
[i
] == 'E')
8515 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8517 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
8518 if (op
!= XVECEXP (x
, i
, j
))
8525 XVECEXP (x
, i
, j
) = op
;
8536 /* Return X converted to MODE. If the value is already truncated to
8537 MODE we can just return a subreg even though in the general case we
8538 would need an explicit truncation. */
8541 gen_lowpart_or_truncate (machine_mode mode
, rtx x
)
8543 if (!CONST_INT_P (x
)
8544 && partial_subreg_p (mode
, GET_MODE (x
))
8545 && !TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (x
))
8546 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
8548 /* Bit-cast X into an integer mode. */
8549 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
8550 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)).require (), x
);
8551 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
).require (),
8555 return gen_lowpart (mode
, x
);
8558 /* See if X can be simplified knowing that we will only refer to it in
8559 MODE and will only refer to those bits that are nonzero in MASK.
8560 If other bits are being computed or if masking operations are done
8561 that select a superset of the bits in MASK, they can sometimes be
8564 Return a possibly simplified expression, but always convert X to
8565 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8567 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8568 are all off in X. This is used when X will be complemented, by either
8569 NOT, NEG, or XOR. */
8572 force_to_mode (rtx x
, machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8575 enum rtx_code code
= GET_CODE (x
);
8576 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8577 machine_mode op_mode
;
8578 unsigned HOST_WIDE_INT nonzero
;
8580 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8581 code below will do the wrong thing since the mode of such an
8582 expression is VOIDmode.
8584 Also do nothing if X is a CLOBBER; this can happen if X was
8585 the return value from a call to gen_lowpart. */
8586 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8589 /* We want to perform the operation in its present mode unless we know
8590 that the operation is valid in MODE, in which case we do the operation
8592 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8593 && have_insn_for (code
, mode
))
8594 ? mode
: GET_MODE (x
));
8596 /* It is not valid to do a right-shift in a narrower mode
8597 than the one it came in with. */
8598 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8599 && partial_subreg_p (mode
, GET_MODE (x
)))
8600 op_mode
= GET_MODE (x
);
8602 /* Truncate MASK to fit OP_MODE. */
8604 mask
&= GET_MODE_MASK (op_mode
);
8606 /* Determine what bits of X are guaranteed to be (non)zero. */
8607 nonzero
= nonzero_bits (x
, mode
);
8609 /* If none of the bits in X are needed, return a zero. */
8610 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8613 /* If X is a CONST_INT, return a new one. Do this here since the
8614 test below will fail. */
8615 if (CONST_INT_P (x
))
8617 if (SCALAR_INT_MODE_P (mode
))
8618 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8621 x
= GEN_INT (INTVAL (x
) & mask
);
8622 return gen_lowpart_common (mode
, x
);
8626 /* If X is narrower than MODE and we want all the bits in X's mode, just
8627 get X in the proper mode. */
8628 if (paradoxical_subreg_p (mode
, GET_MODE (x
))
8629 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8630 return gen_lowpart (mode
, x
);
8632 /* We can ignore the effect of a SUBREG if it narrows the mode or
8633 if the constant masks to zero all the bits the mode doesn't have. */
8634 if (GET_CODE (x
) == SUBREG
8635 && subreg_lowpart_p (x
)
8636 && (partial_subreg_p (x
)
8638 & GET_MODE_MASK (GET_MODE (x
))
8639 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))) == 0))
8640 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8642 scalar_int_mode int_mode
, xmode
;
8643 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
8644 && is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
8645 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8647 return force_int_to_mode (x
, int_mode
, xmode
,
8648 as_a
<scalar_int_mode
> (op_mode
),
8651 return gen_lowpart_or_truncate (mode
, x
);
8654 /* Subroutine of force_to_mode that handles cases in which both X and
8655 the result are scalar integers. MODE is the mode of the result,
8656 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8657 is preferred for simplified versions of X. The other arguments
8658 are as for force_to_mode. */
8661 force_int_to_mode (rtx x
, scalar_int_mode mode
, scalar_int_mode xmode
,
8662 scalar_int_mode op_mode
, unsigned HOST_WIDE_INT mask
,
8665 enum rtx_code code
= GET_CODE (x
);
8666 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8667 unsigned HOST_WIDE_INT fuller_mask
;
8669 poly_int64 const_op0
;
8671 /* When we have an arithmetic operation, or a shift whose count we
8672 do not know, we need to assume that all bits up to the highest-order
8673 bit in MASK will be needed. This is how we form such a mask. */
8674 if (mask
& (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
8675 fuller_mask
= HOST_WIDE_INT_M1U
;
8677 fuller_mask
= ((HOST_WIDE_INT_1U
<< (floor_log2 (mask
) + 1))
8683 /* If X is a (clobber (const_int)), return it since we know we are
8684 generating something that won't match. */
8691 x
= expand_compound_operation (x
);
8692 if (GET_CODE (x
) != code
)
8693 return force_to_mode (x
, mode
, mask
, next_select
);
8697 /* Similarly for a truncate. */
8698 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8701 /* If this is an AND with a constant, convert it into an AND
8702 whose constant is the AND of that constant with MASK. If it
8703 remains an AND of MASK, delete it since it is redundant. */
8705 if (CONST_INT_P (XEXP (x
, 1)))
8707 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8708 mask
& INTVAL (XEXP (x
, 1)));
8711 /* If X is still an AND, see if it is an AND with a mask that
8712 is just some low-order bits. If so, and it is MASK, we don't
8715 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8716 && (INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (xmode
)) == mask
)
8719 /* If it remains an AND, try making another AND with the bits
8720 in the mode mask that aren't in MASK turned on. If the
8721 constant in the AND is wide enough, this might make a
8722 cheaper constant. */
8724 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8725 && GET_MODE_MASK (xmode
) != mask
8726 && HWI_COMPUTABLE_MODE_P (xmode
))
8728 unsigned HOST_WIDE_INT cval
8729 = UINTVAL (XEXP (x
, 1)) | (GET_MODE_MASK (xmode
) & ~mask
);
8732 y
= simplify_gen_binary (AND
, xmode
, XEXP (x
, 0),
8733 gen_int_mode (cval
, xmode
));
8734 if (set_src_cost (y
, xmode
, optimize_this_for_speed_p
)
8735 < set_src_cost (x
, xmode
, optimize_this_for_speed_p
))
8745 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8746 low-order bits (as in an alignment operation) and FOO is already
8747 aligned to that boundary, mask C1 to that boundary as well.
8748 This may eliminate that PLUS and, later, the AND. */
8751 unsigned int width
= GET_MODE_PRECISION (mode
);
8752 unsigned HOST_WIDE_INT smask
= mask
;
8754 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8755 number, sign extend it. */
8757 if (width
< HOST_BITS_PER_WIDE_INT
8758 && (smask
& (HOST_WIDE_INT_1U
<< (width
- 1))) != 0)
8759 smask
|= HOST_WIDE_INT_M1U
<< width
;
8761 if (CONST_INT_P (XEXP (x
, 1))
8762 && pow2p_hwi (- smask
)
8763 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8764 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8765 return force_to_mode (plus_constant (xmode
, XEXP (x
, 0),
8766 (INTVAL (XEXP (x
, 1)) & smask
)),
8767 mode
, smask
, next_select
);
8773 /* Substituting into the operands of a widening MULT is not likely to
8774 create RTL matching a machine insn. */
8776 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
8777 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
8778 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
8779 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
8780 && REG_P (XEXP (XEXP (x
, 0), 0))
8781 && REG_P (XEXP (XEXP (x
, 1), 0)))
8782 return gen_lowpart_or_truncate (mode
, x
);
8784 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8785 most significant bit in MASK since carries from those bits will
8786 affect the bits we are interested in. */
8791 /* If X is (minus C Y) where C's least set bit is larger than any bit
8792 in the mask, then we may replace with (neg Y). */
8793 if (poly_int_rtx_p (XEXP (x
, 0), &const_op0
)
8794 && known_alignment (poly_uint64 (const_op0
)) > mask
)
8796 x
= simplify_gen_unary (NEG
, xmode
, XEXP (x
, 1), xmode
);
8797 return force_to_mode (x
, mode
, mask
, next_select
);
8800 /* Similarly, if C contains every bit in the fuller_mask, then we may
8801 replace with (not Y). */
8802 if (CONST_INT_P (XEXP (x
, 0))
8803 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8805 x
= simplify_gen_unary (NOT
, xmode
, XEXP (x
, 1), xmode
);
8806 return force_to_mode (x
, mode
, mask
, next_select
);
8814 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8815 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8816 operation which may be a bitfield extraction. Ensure that the
8817 constant we form is not wider than the mode of X. */
8819 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8820 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8821 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8822 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8823 && CONST_INT_P (XEXP (x
, 1))
8824 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8825 + floor_log2 (INTVAL (XEXP (x
, 1))))
8826 < GET_MODE_PRECISION (xmode
))
8827 && (UINTVAL (XEXP (x
, 1))
8828 & ~nonzero_bits (XEXP (x
, 0), xmode
)) == 0)
8830 temp
= gen_int_mode ((INTVAL (XEXP (x
, 1)) & mask
)
8831 << INTVAL (XEXP (XEXP (x
, 0), 1)),
8833 temp
= simplify_gen_binary (GET_CODE (x
), xmode
,
8834 XEXP (XEXP (x
, 0), 0), temp
);
8835 x
= simplify_gen_binary (LSHIFTRT
, xmode
, temp
,
8836 XEXP (XEXP (x
, 0), 1));
8837 return force_to_mode (x
, mode
, mask
, next_select
);
8841 /* For most binary operations, just propagate into the operation and
8842 change the mode if we have an operation of that mode. */
8844 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8845 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
8847 /* If we ended up truncating both operands, truncate the result of the
8848 operation instead. */
8849 if (GET_CODE (op0
) == TRUNCATE
8850 && GET_CODE (op1
) == TRUNCATE
)
8852 op0
= XEXP (op0
, 0);
8853 op1
= XEXP (op1
, 0);
8856 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
8857 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
8859 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8861 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
8867 /* For left shifts, do the same, but just for the first operand.
8868 However, we cannot do anything with shifts where we cannot
8869 guarantee that the counts are smaller than the size of the mode
8870 because such a count will have a different meaning in a
8873 if (! (CONST_INT_P (XEXP (x
, 1))
8874 && INTVAL (XEXP (x
, 1)) >= 0
8875 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (mode
))
8876 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
8877 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
8878 < (unsigned HOST_WIDE_INT
) GET_MODE_PRECISION (mode
))))
8881 /* If the shift count is a constant and we can do arithmetic in
8882 the mode of the shift, refine which bits we need. Otherwise, use the
8883 conservative form of the mask. */
8884 if (CONST_INT_P (XEXP (x
, 1))
8885 && INTVAL (XEXP (x
, 1)) >= 0
8886 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (op_mode
)
8887 && HWI_COMPUTABLE_MODE_P (op_mode
))
8888 mask
>>= INTVAL (XEXP (x
, 1));
8892 op0
= gen_lowpart_or_truncate (op_mode
,
8893 force_to_mode (XEXP (x
, 0), mode
,
8894 mask
, next_select
));
8896 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
8898 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
8904 /* Here we can only do something if the shift count is a constant,
8905 this shift constant is valid for the host, and we can do arithmetic
8908 if (CONST_INT_P (XEXP (x
, 1))
8909 && INTVAL (XEXP (x
, 1)) >= 0
8910 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8911 && HWI_COMPUTABLE_MODE_P (op_mode
))
8913 rtx inner
= XEXP (x
, 0);
8914 unsigned HOST_WIDE_INT inner_mask
;
8916 /* Select the mask of the bits we need for the shift operand. */
8917 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
8919 /* We can only change the mode of the shift if we can do arithmetic
8920 in the mode of the shift and INNER_MASK is no wider than the
8921 width of X's mode. */
8922 if ((inner_mask
& ~GET_MODE_MASK (xmode
)) != 0)
8925 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
8927 if (xmode
!= op_mode
|| inner
!= XEXP (x
, 0))
8929 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
8934 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8935 shift and AND produces only copies of the sign bit (C2 is one less
8936 than a power of two), we can do this with just a shift. */
8938 if (GET_CODE (x
) == LSHIFTRT
8939 && CONST_INT_P (XEXP (x
, 1))
8940 /* The shift puts one of the sign bit copies in the least significant
8942 && ((INTVAL (XEXP (x
, 1))
8943 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
8944 >= GET_MODE_PRECISION (xmode
))
8945 && pow2p_hwi (mask
+ 1)
8946 /* Number of bits left after the shift must be more than the mask
8948 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
8949 <= GET_MODE_PRECISION (xmode
))
8950 /* Must be more sign bit copies than the mask needs. */
8951 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
8952 >= exact_log2 (mask
+ 1)))
8954 int nbits
= GET_MODE_PRECISION (xmode
) - exact_log2 (mask
+ 1);
8955 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0),
8956 gen_int_shift_amount (xmode
, nbits
));
8961 /* If we are just looking for the sign bit, we don't need this shift at
8962 all, even if it has a variable count. */
8963 if (val_signbit_p (xmode
, mask
))
8964 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8966 /* If this is a shift by a constant, get a mask that contains those bits
8967 that are not copies of the sign bit. We then have two cases: If
8968 MASK only includes those bits, this can be a logical shift, which may
8969 allow simplifications. If MASK is a single-bit field not within
8970 those bits, we are requesting a copy of the sign bit and hence can
8971 shift the sign bit to the appropriate location. */
8973 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
8974 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
8976 unsigned HOST_WIDE_INT nonzero
;
8979 /* If the considered data is wider than HOST_WIDE_INT, we can't
8980 represent a mask for all its bits in a single scalar.
8981 But we only care about the lower bits, so calculate these. */
8983 if (GET_MODE_PRECISION (xmode
) > HOST_BITS_PER_WIDE_INT
)
8985 nonzero
= HOST_WIDE_INT_M1U
;
8987 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8988 is the number of bits a full-width mask would have set.
8989 We need only shift if these are fewer than nonzero can
8990 hold. If not, we must keep all bits set in nonzero. */
8992 if (GET_MODE_PRECISION (xmode
) - INTVAL (XEXP (x
, 1))
8993 < HOST_BITS_PER_WIDE_INT
)
8994 nonzero
>>= INTVAL (XEXP (x
, 1))
8995 + HOST_BITS_PER_WIDE_INT
8996 - GET_MODE_PRECISION (xmode
);
9000 nonzero
= GET_MODE_MASK (xmode
);
9001 nonzero
>>= INTVAL (XEXP (x
, 1));
9004 if ((mask
& ~nonzero
) == 0)
9006 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, xmode
,
9007 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
9008 if (GET_CODE (x
) != ASHIFTRT
)
9009 return force_to_mode (x
, mode
, mask
, next_select
);
9012 else if ((i
= exact_log2 (mask
)) >= 0)
9014 x
= simplify_shift_const
9015 (NULL_RTX
, LSHIFTRT
, xmode
, XEXP (x
, 0),
9016 GET_MODE_PRECISION (xmode
) - 1 - i
);
9018 if (GET_CODE (x
) != ASHIFTRT
)
9019 return force_to_mode (x
, mode
, mask
, next_select
);
9023 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9024 even if the shift count isn't a constant. */
9026 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0), XEXP (x
, 1));
9030 /* If this is a zero- or sign-extension operation that just affects bits
9031 we don't care about, remove it. Be sure the call above returned
9032 something that is still a shift. */
9034 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
9035 && CONST_INT_P (XEXP (x
, 1))
9036 && INTVAL (XEXP (x
, 1)) >= 0
9037 && (INTVAL (XEXP (x
, 1))
9038 <= GET_MODE_PRECISION (xmode
) - (floor_log2 (mask
) + 1))
9039 && GET_CODE (XEXP (x
, 0)) == ASHIFT
9040 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
9041 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
9048 /* If the shift count is constant and we can do computations
9049 in the mode of X, compute where the bits we care about are.
9050 Otherwise, we can't do anything. Don't change the mode of
9051 the shift or propagate MODE into the shift, though. */
9052 if (CONST_INT_P (XEXP (x
, 1))
9053 && INTVAL (XEXP (x
, 1)) >= 0)
9055 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
9056 xmode
, gen_int_mode (mask
, xmode
),
9058 if (temp
&& CONST_INT_P (temp
))
9059 x
= simplify_gen_binary (code
, xmode
,
9060 force_to_mode (XEXP (x
, 0), xmode
,
9061 INTVAL (temp
), next_select
),
9067 /* If we just want the low-order bit, the NEG isn't needed since it
9068 won't change the low-order bit. */
9070 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
9072 /* We need any bits less significant than the most significant bit in
9073 MASK since carries from those bits will affect the bits we are
9079 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9080 same as the XOR case above. Ensure that the constant we form is not
9081 wider than the mode of X. */
9083 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
9084 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
9085 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
9086 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
9087 < GET_MODE_PRECISION (xmode
))
9088 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
9090 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)), xmode
);
9091 temp
= simplify_gen_binary (XOR
, xmode
, XEXP (XEXP (x
, 0), 0), temp
);
9092 x
= simplify_gen_binary (LSHIFTRT
, xmode
,
9093 temp
, XEXP (XEXP (x
, 0), 1));
9095 return force_to_mode (x
, mode
, mask
, next_select
);
9098 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9099 use the full mask inside the NOT. */
9103 op0
= gen_lowpart_or_truncate (op_mode
,
9104 force_to_mode (XEXP (x
, 0), mode
, mask
,
9106 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
9108 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
9114 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9115 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9116 which is equal to STORE_FLAG_VALUE. */
9117 if ((mask
& ~STORE_FLAG_VALUE
) == 0
9118 && XEXP (x
, 1) == const0_rtx
9119 && GET_MODE (XEXP (x
, 0)) == mode
9120 && pow2p_hwi (nonzero_bits (XEXP (x
, 0), mode
))
9121 && (nonzero_bits (XEXP (x
, 0), mode
)
9122 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
9123 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
9128 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9129 written in a narrower mode. We play it safe and do not do so. */
9131 op0
= gen_lowpart_or_truncate (xmode
,
9132 force_to_mode (XEXP (x
, 1), mode
,
9133 mask
, next_select
));
9134 op1
= gen_lowpart_or_truncate (xmode
,
9135 force_to_mode (XEXP (x
, 2), mode
,
9136 mask
, next_select
));
9137 if (op0
!= XEXP (x
, 1) || op1
!= XEXP (x
, 2))
9138 x
= simplify_gen_ternary (IF_THEN_ELSE
, xmode
,
9139 GET_MODE (XEXP (x
, 0)), XEXP (x
, 0),
9147 /* Ensure we return a value of the proper mode. */
9148 return gen_lowpart_or_truncate (mode
, x
);
9151 /* Return nonzero if X is an expression that has one of two values depending on
9152 whether some other value is zero or nonzero. In that case, we return the
9153 value that is being tested, *PTRUE is set to the value if the rtx being
9154 returned has a nonzero value, and *PFALSE is set to the other alternative.
9156 If we return zero, we set *PTRUE and *PFALSE to X. */
9159 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
9161 machine_mode mode
= GET_MODE (x
);
9162 enum rtx_code code
= GET_CODE (x
);
9163 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
9164 unsigned HOST_WIDE_INT nz
;
9165 scalar_int_mode int_mode
;
9167 /* If we are comparing a value against zero, we are done. */
9168 if ((code
== NE
|| code
== EQ
)
9169 && XEXP (x
, 1) == const0_rtx
)
9171 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
9172 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
9176 /* If this is a unary operation whose operand has one of two values, apply
9177 our opcode to compute those values. */
9178 else if (UNARY_P (x
)
9179 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
9181 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
9182 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
9183 GET_MODE (XEXP (x
, 0)));
9187 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9188 make can't possibly match and would suppress other optimizations. */
9189 else if (code
== COMPARE
)
9192 /* If this is a binary operation, see if either side has only one of two
9193 values. If either one does or if both do and they are conditional on
9194 the same value, compute the new true and false values. */
9195 else if (BINARY_P (x
))
9197 rtx op0
= XEXP (x
, 0);
9198 rtx op1
= XEXP (x
, 1);
9199 cond0
= if_then_else_cond (op0
, &true0
, &false0
);
9200 cond1
= if_then_else_cond (op1
, &true1
, &false1
);
9202 if ((cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
))
9203 && (REG_P (op0
) || REG_P (op1
)))
9205 /* Try to enable a simplification by undoing work done by
9206 if_then_else_cond if it converted a REG into something more
9211 true0
= false0
= op0
;
9216 true1
= false1
= op1
;
9220 if ((cond0
!= 0 || cond1
!= 0)
9221 && ! (cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
)))
9223 /* If if_then_else_cond returned zero, then true/false are the
9224 same rtl. We must copy one of them to prevent invalid rtl
9227 true0
= copy_rtx (true0
);
9228 else if (cond1
== 0)
9229 true1
= copy_rtx (true1
);
9231 if (COMPARISON_P (x
))
9233 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
9235 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
9240 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
9241 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
9244 return cond0
? cond0
: cond1
;
9247 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9248 operands is zero when the other is nonzero, and vice-versa,
9249 and STORE_FLAG_VALUE is 1 or -1. */
9251 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9252 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
9254 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9256 rtx op0
= XEXP (XEXP (x
, 0), 1);
9257 rtx op1
= XEXP (XEXP (x
, 1), 1);
9259 cond0
= XEXP (XEXP (x
, 0), 0);
9260 cond1
= XEXP (XEXP (x
, 1), 0);
9262 if (COMPARISON_P (cond0
)
9263 && COMPARISON_P (cond1
)
9264 && SCALAR_INT_MODE_P (mode
)
9265 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9266 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9267 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9268 || ((swap_condition (GET_CODE (cond0
))
9269 == reversed_comparison_code (cond1
, NULL
))
9270 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9271 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9272 && ! side_effects_p (x
))
9274 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
9275 *pfalse
= simplify_gen_binary (MULT
, mode
,
9277 ? simplify_gen_unary (NEG
, mode
,
9285 /* Similarly for MULT, AND and UMIN, except that for these the result
9287 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9288 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
9289 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9291 cond0
= XEXP (XEXP (x
, 0), 0);
9292 cond1
= XEXP (XEXP (x
, 1), 0);
9294 if (COMPARISON_P (cond0
)
9295 && COMPARISON_P (cond1
)
9296 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9297 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9298 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9299 || ((swap_condition (GET_CODE (cond0
))
9300 == reversed_comparison_code (cond1
, NULL
))
9301 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9302 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9303 && ! side_effects_p (x
))
9305 *ptrue
= *pfalse
= const0_rtx
;
9311 else if (code
== IF_THEN_ELSE
)
9313 /* If we have IF_THEN_ELSE already, extract the condition and
9314 canonicalize it if it is NE or EQ. */
9315 cond0
= XEXP (x
, 0);
9316 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
9317 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
9318 return XEXP (cond0
, 0);
9319 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
9321 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
9322 return XEXP (cond0
, 0);
9328 /* If X is a SUBREG, we can narrow both the true and false values
9329 if the inner expression, if there is a condition. */
9330 else if (code
== SUBREG
9331 && (cond0
= if_then_else_cond (SUBREG_REG (x
), &true0
,
9334 true0
= simplify_gen_subreg (mode
, true0
,
9335 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9336 false0
= simplify_gen_subreg (mode
, false0
,
9337 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9338 if (true0
&& false0
)
9346 /* If X is a constant, this isn't special and will cause confusions
9347 if we treat it as such. Likewise if it is equivalent to a constant. */
9348 else if (CONSTANT_P (x
)
9349 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
9352 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9353 will be least confusing to the rest of the compiler. */
9354 else if (mode
== BImode
)
9356 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
9360 /* If X is known to be either 0 or -1, those are the true and
9361 false values when testing X. */
9362 else if (x
== constm1_rtx
|| x
== const0_rtx
9363 || (is_a
<scalar_int_mode
> (mode
, &int_mode
)
9364 && (num_sign_bit_copies (x
, int_mode
)
9365 == GET_MODE_PRECISION (int_mode
))))
9367 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
9371 /* Likewise for 0 or a single bit. */
9372 else if (HWI_COMPUTABLE_MODE_P (mode
)
9373 && pow2p_hwi (nz
= nonzero_bits (x
, mode
)))
9375 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
9379 /* Otherwise fail; show no condition with true and false values the same. */
9380 *ptrue
= *pfalse
= x
;
9384 /* Return the value of expression X given the fact that condition COND
9385 is known to be true when applied to REG as its first operand and VAL
9386 as its second. X is known to not be shared and so can be modified in
9389 We only handle the simplest cases, and specifically those cases that
9390 arise with IF_THEN_ELSE expressions. */
9393 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
9395 enum rtx_code code
= GET_CODE (x
);
9399 if (side_effects_p (x
))
9402 /* If either operand of the condition is a floating point value,
9403 then we have to avoid collapsing an EQ comparison. */
9405 && rtx_equal_p (x
, reg
)
9406 && ! FLOAT_MODE_P (GET_MODE (x
))
9407 && ! FLOAT_MODE_P (GET_MODE (val
)))
9410 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
9413 /* If X is (abs REG) and we know something about REG's relationship
9414 with zero, we may be able to simplify this. */
9416 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
9419 case GE
: case GT
: case EQ
:
9422 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
9424 GET_MODE (XEXP (x
, 0)));
9429 /* The only other cases we handle are MIN, MAX, and comparisons if the
9430 operands are the same as REG and VAL. */
9432 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
9434 if (rtx_equal_p (XEXP (x
, 0), val
))
9436 std::swap (val
, reg
);
9437 cond
= swap_condition (cond
);
9440 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
9442 if (COMPARISON_P (x
))
9444 if (comparison_dominates_p (cond
, code
))
9445 return VECTOR_MODE_P (GET_MODE (x
)) ? x
: const_true_rtx
;
9447 code
= reversed_comparison_code (x
, NULL
);
9449 && comparison_dominates_p (cond
, code
))
9450 return CONST0_RTX (GET_MODE (x
));
9454 else if (code
== SMAX
|| code
== SMIN
9455 || code
== UMIN
|| code
== UMAX
)
9457 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
9459 /* Do not reverse the condition when it is NE or EQ.
9460 This is because we cannot conclude anything about
9461 the value of 'SMAX (x, y)' when x is not equal to y,
9462 but we can when x equals y. */
9463 if ((code
== SMAX
|| code
== UMAX
)
9464 && ! (cond
== EQ
|| cond
== NE
))
9465 cond
= reverse_condition (cond
);
9470 return unsignedp
? x
: XEXP (x
, 1);
9472 return unsignedp
? x
: XEXP (x
, 0);
9474 return unsignedp
? XEXP (x
, 1) : x
;
9476 return unsignedp
? XEXP (x
, 0) : x
;
9483 else if (code
== SUBREG
)
9485 machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
9486 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
9488 if (SUBREG_REG (x
) != r
)
9490 /* We must simplify subreg here, before we lose track of the
9491 original inner_mode. */
9492 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
9493 inner_mode
, SUBREG_BYTE (x
));
9497 SUBST (SUBREG_REG (x
), r
);
9502 /* We don't have to handle SIGN_EXTEND here, because even in the
9503 case of replacing something with a modeless CONST_INT, a
9504 CONST_INT is already (supposed to be) a valid sign extension for
9505 its narrower mode, which implies it's already properly
9506 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9507 story is different. */
9508 else if (code
== ZERO_EXTEND
)
9510 machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
9511 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
9513 if (XEXP (x
, 0) != r
)
9515 /* We must simplify the zero_extend here, before we lose
9516 track of the original inner_mode. */
9517 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
9522 SUBST (XEXP (x
, 0), r
);
9528 fmt
= GET_RTX_FORMAT (code
);
9529 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9532 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
9533 else if (fmt
[i
] == 'E')
9534 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9535 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
9542 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9543 assignment as a field assignment. */
9546 rtx_equal_for_field_assignment_p (rtx x
, rtx y
, bool widen_x
)
9548 if (widen_x
&& GET_MODE (x
) != GET_MODE (y
))
9550 if (paradoxical_subreg_p (GET_MODE (x
), GET_MODE (y
)))
9552 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
9554 x
= adjust_address_nv (x
, GET_MODE (y
),
9555 byte_lowpart_offset (GET_MODE (y
),
9559 if (x
== y
|| rtx_equal_p (x
, y
))
9562 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
9565 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9566 Note that all SUBREGs of MEM are paradoxical; otherwise they
9567 would have been rewritten. */
9568 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
9569 && MEM_P (SUBREG_REG (y
))
9570 && rtx_equal_p (SUBREG_REG (y
),
9571 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
9574 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
9575 && MEM_P (SUBREG_REG (x
))
9576 && rtx_equal_p (SUBREG_REG (x
),
9577 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
9580 /* We used to see if get_last_value of X and Y were the same but that's
9581 not correct. In one direction, we'll cause the assignment to have
9582 the wrong destination and in the case, we'll import a register into this
9583 insn that might have already have been dead. So fail if none of the
9584 above cases are true. */
9588 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9589 Return that assignment if so.
9591 We only handle the most common cases. */
9594 make_field_assignment (rtx x
)
9596 rtx dest
= SET_DEST (x
);
9597 rtx src
= SET_SRC (x
);
9602 unsigned HOST_WIDE_INT len
;
9605 /* All the rules in this function are specific to scalar integers. */
9606 scalar_int_mode mode
;
9607 if (!is_a
<scalar_int_mode
> (GET_MODE (dest
), &mode
))
9610 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9611 a clear of a one-bit field. We will have changed it to
9612 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9615 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
9616 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
9617 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
9618 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9620 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9623 return gen_rtx_SET (assign
, const0_rtx
);
9627 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
9628 && subreg_lowpart_p (XEXP (src
, 0))
9629 && partial_subreg_p (XEXP (src
, 0))
9630 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9631 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9632 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9633 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9635 assign
= make_extraction (VOIDmode
, dest
, 0,
9636 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9639 return gen_rtx_SET (assign
, const0_rtx
);
9643 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9645 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9646 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9647 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9649 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9652 return gen_rtx_SET (assign
, const1_rtx
);
9656 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9657 SRC is an AND with all bits of that field set, then we can discard
9659 if (GET_CODE (dest
) == ZERO_EXTRACT
9660 && CONST_INT_P (XEXP (dest
, 1))
9661 && GET_CODE (src
) == AND
9662 && CONST_INT_P (XEXP (src
, 1)))
9664 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9665 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9666 unsigned HOST_WIDE_INT ze_mask
;
9668 if (width
>= HOST_BITS_PER_WIDE_INT
)
9671 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9673 /* Complete overlap. We can remove the source AND. */
9674 if ((and_mask
& ze_mask
) == ze_mask
)
9675 return gen_rtx_SET (dest
, XEXP (src
, 0));
9677 /* Partial overlap. We can reduce the source AND. */
9678 if ((and_mask
& ze_mask
) != and_mask
)
9680 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9681 gen_int_mode (and_mask
& ze_mask
, mode
));
9682 return gen_rtx_SET (dest
, src
);
9686 /* The other case we handle is assignments into a constant-position
9687 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9688 a mask that has all one bits except for a group of zero bits and
9689 OTHER is known to have zeros where C1 has ones, this is such an
9690 assignment. Compute the position and length from C1. Shift OTHER
9691 to the appropriate position, force it to the required mode, and
9692 make the extraction. Check for the AND in both operands. */
9694 /* One or more SUBREGs might obscure the constant-position field
9695 assignment. The first one we are likely to encounter is an outer
9696 narrowing SUBREG, which we can just strip for the purposes of
9697 identifying the constant-field assignment. */
9698 scalar_int_mode src_mode
= mode
;
9699 if (GET_CODE (src
) == SUBREG
9700 && subreg_lowpart_p (src
)
9701 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (src
)), &src_mode
))
9702 src
= SUBREG_REG (src
);
9704 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9707 rhs
= expand_compound_operation (XEXP (src
, 0));
9708 lhs
= expand_compound_operation (XEXP (src
, 1));
9710 if (GET_CODE (rhs
) == AND
9711 && CONST_INT_P (XEXP (rhs
, 1))
9712 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9713 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9714 /* The second SUBREG that might get in the way is a paradoxical
9715 SUBREG around the first operand of the AND. We want to
9716 pretend the operand is as wide as the destination here. We
9717 do this by adjusting the MEM to wider mode for the sole
9718 purpose of the call to rtx_equal_for_field_assignment_p. Also
9719 note this trick only works for MEMs. */
9720 else if (GET_CODE (rhs
) == AND
9721 && paradoxical_subreg_p (XEXP (rhs
, 0))
9722 && MEM_P (SUBREG_REG (XEXP (rhs
, 0)))
9723 && CONST_INT_P (XEXP (rhs
, 1))
9724 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs
, 0)),
9726 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9727 else if (GET_CODE (lhs
) == AND
9728 && CONST_INT_P (XEXP (lhs
, 1))
9729 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9730 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9731 /* The second SUBREG that might get in the way is a paradoxical
9732 SUBREG around the first operand of the AND. We want to
9733 pretend the operand is as wide as the destination here. We
9734 do this by adjusting the MEM to wider mode for the sole
9735 purpose of the call to rtx_equal_for_field_assignment_p. Also
9736 note this trick only works for MEMs. */
9737 else if (GET_CODE (lhs
) == AND
9738 && paradoxical_subreg_p (XEXP (lhs
, 0))
9739 && MEM_P (SUBREG_REG (XEXP (lhs
, 0)))
9740 && CONST_INT_P (XEXP (lhs
, 1))
9741 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs
, 0)),
9743 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9747 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (mode
), &len
);
9749 || pos
+ len
> GET_MODE_PRECISION (mode
)
9750 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
9751 || (c1
& nonzero_bits (other
, mode
)) != 0)
9754 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9758 /* The mode to use for the source is the mode of the assignment, or of
9759 what is inside a possible STRICT_LOW_PART. */
9760 machine_mode new_mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9761 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9763 /* Shift OTHER right POS places and make it the source, restricting it
9764 to the proper length and mode. */
9766 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9767 src_mode
, other
, pos
),
9769 src
= force_to_mode (src
, new_mode
,
9770 len
>= HOST_BITS_PER_WIDE_INT
9772 : (HOST_WIDE_INT_1U
<< len
) - 1,
9775 /* If SRC is masked by an AND that does not make a difference in
9776 the value being stored, strip it. */
9777 if (GET_CODE (assign
) == ZERO_EXTRACT
9778 && CONST_INT_P (XEXP (assign
, 1))
9779 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9780 && GET_CODE (src
) == AND
9781 && CONST_INT_P (XEXP (src
, 1))
9782 && UINTVAL (XEXP (src
, 1))
9783 == (HOST_WIDE_INT_1U
<< INTVAL (XEXP (assign
, 1))) - 1)
9784 src
= XEXP (src
, 0);
9786 return gen_rtx_SET (assign
, src
);
9789 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9793 apply_distributive_law (rtx x
)
9795 enum rtx_code code
= GET_CODE (x
);
9796 enum rtx_code inner_code
;
9797 rtx lhs
, rhs
, other
;
9800 /* Distributivity is not true for floating point as it can change the
9801 value. So we don't do it unless -funsafe-math-optimizations. */
9802 if (FLOAT_MODE_P (GET_MODE (x
))
9803 && ! flag_unsafe_math_optimizations
)
9806 /* The outer operation can only be one of the following: */
9807 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9808 && code
!= PLUS
&& code
!= MINUS
)
9814 /* If either operand is a primitive we can't do anything, so get out
9816 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9819 lhs
= expand_compound_operation (lhs
);
9820 rhs
= expand_compound_operation (rhs
);
9821 inner_code
= GET_CODE (lhs
);
9822 if (inner_code
!= GET_CODE (rhs
))
9825 /* See if the inner and outer operations distribute. */
9832 /* These all distribute except over PLUS. */
9833 if (code
== PLUS
|| code
== MINUS
)
9838 if (code
!= PLUS
&& code
!= MINUS
)
9843 /* This is also a multiply, so it distributes over everything. */
9846 /* This used to handle SUBREG, but this turned out to be counter-
9847 productive, since (subreg (op ...)) usually is not handled by
9848 insn patterns, and this "optimization" therefore transformed
9849 recognizable patterns into unrecognizable ones. Therefore the
9850 SUBREG case was removed from here.
9852 It is possible that distributing SUBREG over arithmetic operations
9853 leads to an intermediate result than can then be optimized further,
9854 e.g. by moving the outer SUBREG to the other side of a SET as done
9855 in simplify_set. This seems to have been the original intent of
9856 handling SUBREGs here.
9858 However, with current GCC this does not appear to actually happen,
9859 at least on major platforms. If some case is found where removing
9860 the SUBREG case here prevents follow-on optimizations, distributing
9861 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9867 /* Set LHS and RHS to the inner operands (A and B in the example
9868 above) and set OTHER to the common operand (C in the example).
9869 There is only one way to do this unless the inner operation is
9871 if (COMMUTATIVE_ARITH_P (lhs
)
9872 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
9873 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
9874 else if (COMMUTATIVE_ARITH_P (lhs
)
9875 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
9876 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
9877 else if (COMMUTATIVE_ARITH_P (lhs
)
9878 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
9879 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
9880 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
9881 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
9885 /* Form the new inner operation, seeing if it simplifies first. */
9886 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
9888 /* There is one exception to the general way of distributing:
9889 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9890 if (code
== XOR
&& inner_code
== IOR
)
9893 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
9896 /* We may be able to continuing distributing the result, so call
9897 ourselves recursively on the inner operation before forming the
9898 outer operation, which we return. */
9899 return simplify_gen_binary (inner_code
, GET_MODE (x
),
9900 apply_distributive_law (tem
), other
);
9903 /* See if X is of the form (* (+ A B) C), and if so convert to
9904 (+ (* A C) (* B C)) and try to simplify.
9906 Most of the time, this results in no change. However, if some of
9907 the operands are the same or inverses of each other, simplifications
9910 For example, (and (ior A B) (not B)) can occur as the result of
9911 expanding a bit field assignment. When we apply the distributive
9912 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9913 which then simplifies to (and (A (not B))).
9915 Note that no checks happen on the validity of applying the inverse
9916 distributive law. This is pointless since we can do it in the
9917 few places where this routine is called.
9919 N is the index of the term that is decomposed (the arithmetic operation,
9920 i.e. (+ A B) in the first example above). !N is the index of the term that
9921 is distributed, i.e. of C in the first example above. */
9923 distribute_and_simplify_rtx (rtx x
, int n
)
9926 enum rtx_code outer_code
, inner_code
;
9927 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
9929 /* Distributivity is not true for floating point as it can change the
9930 value. So we don't do it unless -funsafe-math-optimizations. */
9931 if (FLOAT_MODE_P (GET_MODE (x
))
9932 && ! flag_unsafe_math_optimizations
)
9935 decomposed
= XEXP (x
, n
);
9936 if (!ARITHMETIC_P (decomposed
))
9939 mode
= GET_MODE (x
);
9940 outer_code
= GET_CODE (x
);
9941 distributed
= XEXP (x
, !n
);
9943 inner_code
= GET_CODE (decomposed
);
9944 inner_op0
= XEXP (decomposed
, 0);
9945 inner_op1
= XEXP (decomposed
, 1);
9947 /* Special case (and (xor B C) (not A)), which is equivalent to
9948 (xor (ior A B) (ior A C)) */
9949 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
9951 distributed
= XEXP (distributed
, 0);
9957 /* Distribute the second term. */
9958 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
9959 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
9963 /* Distribute the first term. */
9964 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
9965 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
9968 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
9970 if (GET_CODE (tmp
) != outer_code
9971 && (set_src_cost (tmp
, mode
, optimize_this_for_speed_p
)
9972 < set_src_cost (x
, mode
, optimize_this_for_speed_p
)))
9978 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9979 in MODE. Return an equivalent form, if different from (and VAROP
9980 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9983 simplify_and_const_int_1 (scalar_int_mode mode
, rtx varop
,
9984 unsigned HOST_WIDE_INT constop
)
9986 unsigned HOST_WIDE_INT nonzero
;
9987 unsigned HOST_WIDE_INT orig_constop
;
9992 orig_constop
= constop
;
9993 if (GET_CODE (varop
) == CLOBBER
)
9996 /* Simplify VAROP knowing that we will be only looking at some of the
9999 Note by passing in CONSTOP, we guarantee that the bits not set in
10000 CONSTOP are not significant and will never be examined. We must
10001 ensure that is the case by explicitly masking out those bits
10002 before returning. */
10003 varop
= force_to_mode (varop
, mode
, constop
, 0);
10005 /* If VAROP is a CLOBBER, we will fail so return it. */
10006 if (GET_CODE (varop
) == CLOBBER
)
10009 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10010 to VAROP and return the new constant. */
10011 if (CONST_INT_P (varop
))
10012 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
10014 /* See what bits may be nonzero in VAROP. Unlike the general case of
10015 a call to nonzero_bits, here we don't care about bits outside
10018 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
10020 /* Turn off all bits in the constant that are known to already be zero.
10021 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10022 which is tested below. */
10024 constop
&= nonzero
;
10026 /* If we don't have any bits left, return zero. */
10027 if (constop
== 0 && !side_effects_p (varop
))
10030 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10031 a power of two, we can replace this with an ASHIFT. */
10032 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
10033 && (i
= exact_log2 (constop
)) >= 0)
10034 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
10036 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10037 or XOR, then try to apply the distributive law. This may eliminate
10038 operations if either branch can be simplified because of the AND.
10039 It may also make some cases more complex, but those cases probably
10040 won't match a pattern either with or without this. */
10042 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
10044 scalar_int_mode varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10048 apply_distributive_law
10049 (simplify_gen_binary (GET_CODE (varop
), varop_mode
,
10050 simplify_and_const_int (NULL_RTX
, varop_mode
,
10053 simplify_and_const_int (NULL_RTX
, varop_mode
,
10058 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10059 the AND and see if one of the operands simplifies to zero. If so, we
10060 may eliminate it. */
10062 if (GET_CODE (varop
) == PLUS
10063 && pow2p_hwi (constop
+ 1))
10067 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
10068 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
10069 if (o0
== const0_rtx
)
10071 if (o1
== const0_rtx
)
10075 /* Make a SUBREG if necessary. If we can't make it, fail. */
10076 varop
= gen_lowpart (mode
, varop
);
10077 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
10080 /* If we are only masking insignificant bits, return VAROP. */
10081 if (constop
== nonzero
)
10084 if (varop
== orig_varop
&& constop
== orig_constop
)
10087 /* Otherwise, return an AND. */
10088 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
10092 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10095 Return an equivalent form, if different from X. Otherwise, return X. If
10096 X is zero, we are to always construct the equivalent form. */
10099 simplify_and_const_int (rtx x
, scalar_int_mode mode
, rtx varop
,
10100 unsigned HOST_WIDE_INT constop
)
10102 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
10107 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
10108 gen_int_mode (constop
, mode
));
10109 if (GET_MODE (x
) != mode
)
10110 x
= gen_lowpart (mode
, x
);
10114 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10115 We don't care about bits outside of those defined in MODE.
10116 We DO care about all the bits in MODE, even if XMODE is smaller than MODE.
10118 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10119 a shift, AND, or zero_extract, we can do better. */
10122 reg_nonzero_bits_for_combine (const_rtx x
, scalar_int_mode xmode
,
10123 scalar_int_mode mode
,
10124 unsigned HOST_WIDE_INT
*nonzero
)
10127 reg_stat_type
*rsp
;
10129 /* If X is a register whose nonzero bits value is current, use it.
10130 Otherwise, if X is a register whose value we can find, use that
10131 value. Otherwise, use the previously-computed global nonzero bits
10132 for this register. */
10134 rsp
= ®_stat
[REGNO (x
)];
10135 if (rsp
->last_set_value
!= 0
10136 && (rsp
->last_set_mode
== mode
10137 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10138 && GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
10139 && GET_MODE_CLASS (mode
) == MODE_INT
))
10140 && ((rsp
->last_set_label
>= label_tick_ebb_start
10141 && rsp
->last_set_label
< label_tick
)
10142 || (rsp
->last_set_label
== label_tick
10143 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10144 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10145 && REGNO (x
) < reg_n_sets_max
10146 && REG_N_SETS (REGNO (x
)) == 1
10147 && !REGNO_REG_SET_P
10148 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10151 /* Note that, even if the precision of last_set_mode is lower than that
10152 of mode, record_value_for_reg invoked nonzero_bits on the register
10153 with nonzero_bits_mode (because last_set_mode is necessarily integral
10154 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10155 are all valid, hence in mode too since nonzero_bits_mode is defined
10156 to the largest HWI_COMPUTABLE_MODE_P mode. */
10157 *nonzero
&= rsp
->last_set_nonzero_bits
;
10161 tem
= get_last_value (x
);
10164 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
10165 tem
= sign_extend_short_imm (tem
, xmode
, GET_MODE_PRECISION (mode
));
10170 if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
10172 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
10174 if (GET_MODE_PRECISION (xmode
) < GET_MODE_PRECISION (mode
))
10175 /* We don't know anything about the upper bits. */
10176 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (xmode
);
10184 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10185 end of X that are known to be equal to the sign bit. X will be used
10186 in mode MODE; the returned value will always be between 1 and the
10187 number of bits in MODE. */
10190 reg_num_sign_bit_copies_for_combine (const_rtx x
, scalar_int_mode xmode
,
10191 scalar_int_mode mode
,
10192 unsigned int *result
)
10195 reg_stat_type
*rsp
;
10197 rsp
= ®_stat
[REGNO (x
)];
10198 if (rsp
->last_set_value
!= 0
10199 && rsp
->last_set_mode
== mode
10200 && ((rsp
->last_set_label
>= label_tick_ebb_start
10201 && rsp
->last_set_label
< label_tick
)
10202 || (rsp
->last_set_label
== label_tick
10203 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10204 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10205 && REGNO (x
) < reg_n_sets_max
10206 && REG_N_SETS (REGNO (x
)) == 1
10207 && !REGNO_REG_SET_P
10208 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10211 *result
= rsp
->last_set_sign_bit_copies
;
10215 tem
= get_last_value (x
);
10219 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
10220 && GET_MODE_PRECISION (xmode
) == GET_MODE_PRECISION (mode
))
10221 *result
= rsp
->sign_bit_copies
;
10226 /* Return the number of "extended" bits there are in X, when interpreted
10227 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10228 unsigned quantities, this is the number of high-order zero bits.
10229 For signed quantities, this is the number of copies of the sign bit
10230 minus 1. In both case, this function returns the number of "spare"
10231 bits. For example, if two quantities for which this function returns
10232 at least 1 are added, the addition is known not to overflow.
10234 This function will always return 0 unless called during combine, which
10235 implies that it must be called from a define_split. */
10238 extended_count (const_rtx x
, machine_mode mode
, int unsignedp
)
10240 if (nonzero_sign_valid
== 0)
10243 scalar_int_mode int_mode
;
10245 ? (is_a
<scalar_int_mode
> (mode
, &int_mode
)
10246 && HWI_COMPUTABLE_MODE_P (int_mode
)
10247 ? (unsigned int) (GET_MODE_PRECISION (int_mode
) - 1
10248 - floor_log2 (nonzero_bits (x
, int_mode
)))
10250 : num_sign_bit_copies (x
, mode
) - 1);
10253 /* This function is called from `simplify_shift_const' to merge two
10254 outer operations. Specifically, we have already found that we need
10255 to perform operation *POP0 with constant *PCONST0 at the outermost
10256 position. We would now like to also perform OP1 with constant CONST1
10257 (with *POP0 being done last).
10259 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10260 the resulting operation. *PCOMP_P is set to 1 if we would need to
10261 complement the innermost operand, otherwise it is unchanged.
10263 MODE is the mode in which the operation will be done. No bits outside
10264 the width of this mode matter. It is assumed that the width of this mode
10265 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10267 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10268 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10269 result is simply *PCONST0.
10271 If the resulting operation cannot be expressed as one operation, we
10272 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10275 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, machine_mode mode
, int *pcomp_p
)
10277 enum rtx_code op0
= *pop0
;
10278 HOST_WIDE_INT const0
= *pconst0
;
10280 const0
&= GET_MODE_MASK (mode
);
10281 const1
&= GET_MODE_MASK (mode
);
10283 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10287 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10290 if (op1
== UNKNOWN
|| op0
== SET
)
10293 else if (op0
== UNKNOWN
)
10294 op0
= op1
, const0
= const1
;
10296 else if (op0
== op1
)
10320 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10321 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
10324 /* If the two constants aren't the same, we can't do anything. The
10325 remaining six cases can all be done. */
10326 else if (const0
!= const1
)
10334 /* (a & b) | b == b */
10336 else /* op1 == XOR */
10337 /* (a ^ b) | b == a | b */
10343 /* (a & b) ^ b == (~a) & b */
10344 op0
= AND
, *pcomp_p
= 1;
10345 else /* op1 == IOR */
10346 /* (a | b) ^ b == a & ~b */
10347 op0
= AND
, const0
= ~const0
;
10352 /* (a | b) & b == b */
10354 else /* op1 == XOR */
10355 /* (a ^ b) & b) == (~a) & b */
10362 /* Check for NO-OP cases. */
10363 const0
&= GET_MODE_MASK (mode
);
10365 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
10367 else if (const0
== 0 && op0
== AND
)
10369 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
10375 /* ??? Slightly redundant with the above mask, but not entirely.
10376 Moving this above means we'd have to sign-extend the mode mask
10377 for the final test. */
10378 if (op0
!= UNKNOWN
&& op0
!= NEG
)
10379 *pconst0
= trunc_int_for_mode (const0
, mode
);
10384 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10385 the shift in. The original shift operation CODE is performed on OP in
10386 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10387 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10388 result of the shift is subject to operation OUTER_CODE with operand
10391 static scalar_int_mode
10392 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
10393 scalar_int_mode orig_mode
, scalar_int_mode mode
,
10394 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
10396 gcc_assert (GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (orig_mode
));
10398 /* In general we can't perform in wider mode for right shift and rotate. */
10402 /* We can still widen if the bits brought in from the left are identical
10403 to the sign bit of ORIG_MODE. */
10404 if (num_sign_bit_copies (op
, mode
)
10405 > (unsigned) (GET_MODE_PRECISION (mode
)
10406 - GET_MODE_PRECISION (orig_mode
)))
10411 /* Similarly here but with zero bits. */
10412 if (HWI_COMPUTABLE_MODE_P (mode
)
10413 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
10416 /* We can also widen if the bits brought in will be masked off. This
10417 operation is performed in ORIG_MODE. */
10418 if (outer_code
== AND
)
10420 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
10423 && GET_MODE_PRECISION (orig_mode
) - care_bits
>= count
)
10432 gcc_unreachable ();
10439 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10440 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10441 if we cannot simplify it. Otherwise, return a simplified value.
10443 The shift is normally computed in the widest mode we find in VAROP, as
10444 long as it isn't a different number of words than RESULT_MODE. Exceptions
10445 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10448 simplify_shift_const_1 (enum rtx_code code
, machine_mode result_mode
,
10449 rtx varop
, int orig_count
)
10451 enum rtx_code orig_code
= code
;
10452 rtx orig_varop
= varop
;
10454 machine_mode mode
= result_mode
;
10455 machine_mode shift_mode
;
10456 scalar_int_mode tmode
, inner_mode
, int_mode
, int_varop_mode
, int_result_mode
;
10457 /* We form (outer_op (code varop count) (outer_const)). */
10458 enum rtx_code outer_op
= UNKNOWN
;
10459 HOST_WIDE_INT outer_const
= 0;
10460 int complement_p
= 0;
10463 /* Make sure and truncate the "natural" shift on the way in. We don't
10464 want to do this inside the loop as it makes it more difficult to
10466 if (SHIFT_COUNT_TRUNCATED
)
10467 orig_count
&= GET_MODE_UNIT_BITSIZE (mode
) - 1;
10469 /* If we were given an invalid count, don't do anything except exactly
10470 what was requested. */
10472 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_UNIT_PRECISION (mode
))
10475 count
= orig_count
;
10477 /* Unless one of the branches of the `if' in this loop does a `continue',
10478 we will `break' the loop after the `if'. */
10482 /* If we have an operand of (clobber (const_int 0)), fail. */
10483 if (GET_CODE (varop
) == CLOBBER
)
10486 /* Convert ROTATERT to ROTATE. */
10487 if (code
== ROTATERT
)
10489 unsigned int bitsize
= GET_MODE_UNIT_PRECISION (result_mode
);
10491 count
= bitsize
- count
;
10494 shift_mode
= result_mode
;
10495 if (shift_mode
!= mode
)
10497 /* We only change the modes of scalar shifts. */
10498 int_mode
= as_a
<scalar_int_mode
> (mode
);
10499 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10500 shift_mode
= try_widen_shift_mode (code
, varop
, count
,
10501 int_result_mode
, int_mode
,
10502 outer_op
, outer_const
);
10505 scalar_int_mode shift_unit_mode
10506 = as_a
<scalar_int_mode
> (GET_MODE_INNER (shift_mode
));
10508 /* Handle cases where the count is greater than the size of the mode
10509 minus 1. For ASHIFT, use the size minus one as the count (this can
10510 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10511 take the count modulo the size. For other shifts, the result is
10514 Since these shifts are being produced by the compiler by combining
10515 multiple operations, each of which are defined, we know what the
10516 result is supposed to be. */
10518 if (count
> (GET_MODE_PRECISION (shift_unit_mode
) - 1))
10520 if (code
== ASHIFTRT
)
10521 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10522 else if (code
== ROTATE
|| code
== ROTATERT
)
10523 count
%= GET_MODE_PRECISION (shift_unit_mode
);
10526 /* We can't simply return zero because there may be an
10528 varop
= const0_rtx
;
10534 /* If we discovered we had to complement VAROP, leave. Making a NOT
10535 here would cause an infinite loop. */
10539 if (shift_mode
== shift_unit_mode
)
10541 /* An arithmetic right shift of a quantity known to be -1 or 0
10543 if (code
== ASHIFTRT
10544 && (num_sign_bit_copies (varop
, shift_unit_mode
)
10545 == GET_MODE_PRECISION (shift_unit_mode
)))
10551 /* If we are doing an arithmetic right shift and discarding all but
10552 the sign bit copies, this is equivalent to doing a shift by the
10553 bitsize minus one. Convert it into that shift because it will
10554 often allow other simplifications. */
10556 if (code
== ASHIFTRT
10557 && (count
+ num_sign_bit_copies (varop
, shift_unit_mode
)
10558 >= GET_MODE_PRECISION (shift_unit_mode
)))
10559 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10561 /* We simplify the tests below and elsewhere by converting
10562 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10563 `make_compound_operation' will convert it to an ASHIFTRT for
10564 those machines (such as VAX) that don't have an LSHIFTRT. */
10565 if (code
== ASHIFTRT
10566 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10567 && val_signbit_known_clear_p (shift_unit_mode
,
10568 nonzero_bits (varop
,
10572 if (((code
== LSHIFTRT
10573 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10574 && !(nonzero_bits (varop
, shift_unit_mode
) >> count
))
10576 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10577 && !((nonzero_bits (varop
, shift_unit_mode
) << count
)
10578 & GET_MODE_MASK (shift_unit_mode
))))
10579 && !side_effects_p (varop
))
10580 varop
= const0_rtx
;
10583 switch (GET_CODE (varop
))
10589 new_rtx
= expand_compound_operation (varop
);
10590 if (new_rtx
!= varop
)
10598 /* The following rules apply only to scalars. */
10599 if (shift_mode
!= shift_unit_mode
)
10601 int_mode
= as_a
<scalar_int_mode
> (mode
);
10603 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10604 minus the width of a smaller mode, we can do this with a
10605 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10606 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10607 && ! mode_dependent_address_p (XEXP (varop
, 0),
10608 MEM_ADDR_SPACE (varop
))
10609 && ! MEM_VOLATILE_P (varop
)
10610 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode
) - count
, 1)
10613 new_rtx
= adjust_address_nv (varop
, tmode
,
10614 BYTES_BIG_ENDIAN
? 0
10615 : count
/ BITS_PER_UNIT
);
10617 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
10618 : ZERO_EXTEND
, int_mode
, new_rtx
);
10625 /* The following rules apply only to scalars. */
10626 if (shift_mode
!= shift_unit_mode
)
10628 int_mode
= as_a
<scalar_int_mode
> (mode
);
10629 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10631 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10632 the same number of words as what we've seen so far. Then store
10633 the widest mode in MODE. */
10634 if (subreg_lowpart_p (varop
)
10635 && is_int_mode (GET_MODE (SUBREG_REG (varop
)), &inner_mode
)
10636 && GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_varop_mode
)
10637 && (CEIL (GET_MODE_SIZE (inner_mode
), UNITS_PER_WORD
)
10638 == CEIL (GET_MODE_SIZE (int_mode
), UNITS_PER_WORD
))
10639 && GET_MODE_CLASS (int_varop_mode
) == MODE_INT
)
10641 varop
= SUBREG_REG (varop
);
10642 if (GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_mode
))
10649 /* Some machines use MULT instead of ASHIFT because MULT
10650 is cheaper. But it is still better on those machines to
10651 merge two shifts into one. */
10652 if (CONST_INT_P (XEXP (varop
, 1))
10653 && (log2
= exact_log2 (UINTVAL (XEXP (varop
, 1)))) >= 0)
10655 rtx log2_rtx
= gen_int_shift_amount (GET_MODE (varop
), log2
);
10656 varop
= simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
10657 XEXP (varop
, 0), log2_rtx
);
10663 /* Similar, for when divides are cheaper. */
10664 if (CONST_INT_P (XEXP (varop
, 1))
10665 && (log2
= exact_log2 (UINTVAL (XEXP (varop
, 1)))) >= 0)
10667 rtx log2_rtx
= gen_int_shift_amount (GET_MODE (varop
), log2
);
10668 varop
= simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10669 XEXP (varop
, 0), log2_rtx
);
10675 /* If we are extracting just the sign bit of an arithmetic
10676 right shift, that shift is not needed. However, the sign
10677 bit of a wider mode may be different from what would be
10678 interpreted as the sign bit in a narrower mode, so, if
10679 the result is narrower, don't discard the shift. */
10680 if (code
== LSHIFTRT
10681 && count
== (GET_MODE_UNIT_BITSIZE (result_mode
) - 1)
10682 && (GET_MODE_UNIT_BITSIZE (result_mode
)
10683 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop
))))
10685 varop
= XEXP (varop
, 0);
10694 /* The following rules apply only to scalars. */
10695 if (shift_mode
!= shift_unit_mode
)
10697 int_mode
= as_a
<scalar_int_mode
> (mode
);
10698 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10699 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10701 /* Here we have two nested shifts. The result is usually the
10702 AND of a new shift with a mask. We compute the result below. */
10703 if (CONST_INT_P (XEXP (varop
, 1))
10704 && INTVAL (XEXP (varop
, 1)) >= 0
10705 && INTVAL (XEXP (varop
, 1)) < GET_MODE_PRECISION (int_varop_mode
)
10706 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
10707 && HWI_COMPUTABLE_MODE_P (int_mode
))
10709 enum rtx_code first_code
= GET_CODE (varop
);
10710 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10711 unsigned HOST_WIDE_INT mask
;
10714 /* We have one common special case. We can't do any merging if
10715 the inner code is an ASHIFTRT of a smaller mode. However, if
10716 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10717 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10718 we can convert it to
10719 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10720 This simplifies certain SIGN_EXTEND operations. */
10721 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10722 && count
== (GET_MODE_PRECISION (int_result_mode
)
10723 - GET_MODE_PRECISION (int_varop_mode
)))
10725 /* C3 has the low-order C1 bits zero. */
10727 mask
= GET_MODE_MASK (int_mode
)
10728 & ~((HOST_WIDE_INT_1U
<< first_count
) - 1);
10730 varop
= simplify_and_const_int (NULL_RTX
, int_result_mode
,
10731 XEXP (varop
, 0), mask
);
10732 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
,
10733 int_result_mode
, varop
, count
);
10734 count
= first_count
;
10739 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10740 than C1 high-order bits equal to the sign bit, we can convert
10741 this to either an ASHIFT or an ASHIFTRT depending on the
10744 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10746 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10747 && int_varop_mode
== shift_unit_mode
10748 && (num_sign_bit_copies (XEXP (varop
, 0), shift_unit_mode
)
10751 varop
= XEXP (varop
, 0);
10752 count
-= first_count
;
10762 /* There are some cases we can't do. If CODE is ASHIFTRT,
10763 we can only do this if FIRST_CODE is also ASHIFTRT.
10765 We can't do the case when CODE is ROTATE and FIRST_CODE is
10768 If the mode of this shift is not the mode of the outer shift,
10769 we can't do this if either shift is a right shift or ROTATE.
10771 Finally, we can't do any of these if the mode is too wide
10772 unless the codes are the same.
10774 Handle the case where the shift codes are the same
10777 if (code
== first_code
)
10779 if (int_varop_mode
!= int_result_mode
10780 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10781 || code
== ROTATE
))
10784 count
+= first_count
;
10785 varop
= XEXP (varop
, 0);
10789 if (code
== ASHIFTRT
10790 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10791 || GET_MODE_PRECISION (int_mode
) > HOST_BITS_PER_WIDE_INT
10792 || (int_varop_mode
!= int_result_mode
10793 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10794 || first_code
== ROTATE
10795 || code
== ROTATE
)))
10798 /* To compute the mask to apply after the shift, shift the
10799 nonzero bits of the inner shift the same way the
10800 outer shift will. */
10802 mask_rtx
= gen_int_mode (nonzero_bits (varop
, int_varop_mode
),
10804 rtx count_rtx
= gen_int_shift_amount (int_result_mode
, count
);
10806 = simplify_const_binary_operation (code
, int_result_mode
,
10807 mask_rtx
, count_rtx
);
10809 /* Give up if we can't compute an outer operation to use. */
10811 || !CONST_INT_P (mask_rtx
)
10812 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10814 int_result_mode
, &complement_p
))
10817 /* If the shifts are in the same direction, we add the
10818 counts. Otherwise, we subtract them. */
10819 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10820 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10821 count
+= first_count
;
10823 count
-= first_count
;
10825 /* If COUNT is positive, the new shift is usually CODE,
10826 except for the two exceptions below, in which case it is
10827 FIRST_CODE. If the count is negative, FIRST_CODE should
10830 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10831 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10833 else if (count
< 0)
10834 code
= first_code
, count
= -count
;
10836 varop
= XEXP (varop
, 0);
10840 /* If we have (A << B << C) for any shift, we can convert this to
10841 (A << C << B). This wins if A is a constant. Only try this if
10842 B is not a constant. */
10844 else if (GET_CODE (varop
) == code
10845 && CONST_INT_P (XEXP (varop
, 0))
10846 && !CONST_INT_P (XEXP (varop
, 1)))
10848 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10849 sure the result will be masked. See PR70222. */
10850 if (code
== LSHIFTRT
10851 && int_mode
!= int_result_mode
10852 && !merge_outer_ops (&outer_op
, &outer_const
, AND
,
10853 GET_MODE_MASK (int_result_mode
)
10854 >> orig_count
, int_result_mode
,
10857 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10858 up outer sign extension (often left and right shift) is
10859 hardly more efficient than the original. See PR70429.
10860 Similarly punt for rotates with different modes.
10862 if ((code
== ASHIFTRT
|| code
== ROTATE
)
10863 && int_mode
!= int_result_mode
)
10866 rtx count_rtx
= gen_int_shift_amount (int_result_mode
, count
);
10867 rtx new_rtx
= simplify_const_binary_operation (code
, int_mode
,
10870 varop
= gen_rtx_fmt_ee (code
, int_mode
, new_rtx
, XEXP (varop
, 1));
10877 /* The following rules apply only to scalars. */
10878 if (shift_mode
!= shift_unit_mode
)
10881 /* Make this fit the case below. */
10882 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0), constm1_rtx
);
10888 /* The following rules apply only to scalars. */
10889 if (shift_mode
!= shift_unit_mode
)
10891 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10892 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10894 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10895 with C the size of VAROP - 1 and the shift is logical if
10896 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10897 we have an (le X 0) operation. If we have an arithmetic shift
10898 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10899 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10901 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
10902 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
10903 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10904 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10905 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
10906 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10909 varop
= gen_rtx_LE (int_varop_mode
, XEXP (varop
, 1),
10912 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10913 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
10918 /* If we have (shift (logical)), move the logical to the outside
10919 to allow it to possibly combine with another logical and the
10920 shift to combine with another shift. This also canonicalizes to
10921 what a ZERO_EXTRACT looks like. Also, some machines have
10922 (and (shift)) insns. */
10924 if (CONST_INT_P (XEXP (varop
, 1))
10925 /* We can't do this if we have (ashiftrt (xor)) and the
10926 constant has its sign bit set in shift_unit_mode with
10927 shift_unit_mode wider than result_mode. */
10928 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10929 && int_result_mode
!= shift_unit_mode
10930 && trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10931 shift_unit_mode
) < 0)
10932 && (new_rtx
= simplify_const_binary_operation
10933 (code
, int_result_mode
,
10934 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
10935 gen_int_shift_amount (int_result_mode
, count
))) != 0
10936 && CONST_INT_P (new_rtx
)
10937 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
10938 INTVAL (new_rtx
), int_result_mode
,
10941 varop
= XEXP (varop
, 0);
10945 /* If we can't do that, try to simplify the shift in each arm of the
10946 logical expression, make a new logical expression, and apply
10947 the inverse distributive law. This also can't be done for
10948 (ashiftrt (xor)) where we've widened the shift and the constant
10949 changes the sign bit. */
10950 if (CONST_INT_P (XEXP (varop
, 1))
10951 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10952 && int_result_mode
!= shift_unit_mode
10953 && trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10954 shift_unit_mode
) < 0))
10956 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
10957 XEXP (varop
, 0), count
);
10958 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
10959 XEXP (varop
, 1), count
);
10961 varop
= simplify_gen_binary (GET_CODE (varop
), shift_unit_mode
,
10963 varop
= apply_distributive_law (varop
);
10971 /* The following rules apply only to scalars. */
10972 if (shift_mode
!= shift_unit_mode
)
10974 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10976 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10977 says that the sign bit can be tested, FOO has mode MODE, C is
10978 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10979 that may be nonzero. */
10980 if (code
== LSHIFTRT
10981 && XEXP (varop
, 1) == const0_rtx
10982 && GET_MODE (XEXP (varop
, 0)) == int_result_mode
10983 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
10984 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
10985 && STORE_FLAG_VALUE
== -1
10986 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
10987 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
10988 int_result_mode
, &complement_p
))
10990 varop
= XEXP (varop
, 0);
10997 /* The following rules apply only to scalars. */
10998 if (shift_mode
!= shift_unit_mode
)
11000 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11002 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11003 than the number of bits in the mode is equivalent to A. */
11004 if (code
== LSHIFTRT
11005 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11006 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1)
11008 varop
= XEXP (varop
, 0);
11013 /* NEG commutes with ASHIFT since it is multiplication. Move the
11014 NEG outside to allow shifts to combine. */
11016 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0,
11017 int_result_mode
, &complement_p
))
11019 varop
= XEXP (varop
, 0);
11025 /* The following rules apply only to scalars. */
11026 if (shift_mode
!= shift_unit_mode
)
11028 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11030 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11031 is one less than the number of bits in the mode is
11032 equivalent to (xor A 1). */
11033 if (code
== LSHIFTRT
11034 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11035 && XEXP (varop
, 1) == constm1_rtx
11036 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
11037 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
11038 int_result_mode
, &complement_p
))
11041 varop
= XEXP (varop
, 0);
11045 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11046 that might be nonzero in BAR are those being shifted out and those
11047 bits are known zero in FOO, we can replace the PLUS with FOO.
11048 Similarly in the other operand order. This code occurs when
11049 we are computing the size of a variable-size array. */
11051 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
11052 && count
< HOST_BITS_PER_WIDE_INT
11053 && nonzero_bits (XEXP (varop
, 1), int_result_mode
) >> count
== 0
11054 && (nonzero_bits (XEXP (varop
, 1), int_result_mode
)
11055 & nonzero_bits (XEXP (varop
, 0), int_result_mode
)) == 0)
11057 varop
= XEXP (varop
, 0);
11060 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
11061 && count
< HOST_BITS_PER_WIDE_INT
11062 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
11063 && (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
11065 && (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
11066 & nonzero_bits (XEXP (varop
, 1), int_result_mode
)) == 0)
11068 varop
= XEXP (varop
, 1);
11072 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11074 && CONST_INT_P (XEXP (varop
, 1))
11075 && (new_rtx
= simplify_const_binary_operation
11076 (ASHIFT
, int_result_mode
,
11077 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11078 gen_int_shift_amount (int_result_mode
, count
))) != 0
11079 && CONST_INT_P (new_rtx
)
11080 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
11081 INTVAL (new_rtx
), int_result_mode
,
11084 varop
= XEXP (varop
, 0);
11088 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11089 signbit', and attempt to change the PLUS to an XOR and move it to
11090 the outer operation as is done above in the AND/IOR/XOR case
11091 leg for shift(logical). See details in logical handling above
11092 for reasoning in doing so. */
11093 if (code
== LSHIFTRT
11094 && CONST_INT_P (XEXP (varop
, 1))
11095 && mode_signbit_p (int_result_mode
, XEXP (varop
, 1))
11096 && (new_rtx
= simplify_const_binary_operation
11097 (code
, int_result_mode
,
11098 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11099 gen_int_shift_amount (int_result_mode
, count
))) != 0
11100 && CONST_INT_P (new_rtx
)
11101 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
11102 INTVAL (new_rtx
), int_result_mode
,
11105 varop
= XEXP (varop
, 0);
11112 /* The following rules apply only to scalars. */
11113 if (shift_mode
!= shift_unit_mode
)
11115 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
11117 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11118 with C the size of VAROP - 1 and the shift is logical if
11119 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11120 we have a (gt X 0) operation. If the shift is arithmetic with
11121 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11122 we have a (neg (gt X 0)) operation. */
11124 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
11125 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
11126 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
11127 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
11128 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11129 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
11130 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
11133 varop
= gen_rtx_GT (int_varop_mode
, XEXP (varop
, 1),
11136 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
11137 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
11144 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11145 if the truncate does not affect the value. */
11146 if (code
== LSHIFTRT
11147 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
11148 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11149 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
11150 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop
, 0)))
11151 - GET_MODE_UNIT_PRECISION (GET_MODE (varop
)))))
11153 rtx varop_inner
= XEXP (varop
, 0);
11154 int new_count
= count
+ INTVAL (XEXP (varop_inner
, 1));
11155 rtx new_count_rtx
= gen_int_shift_amount (GET_MODE (varop_inner
),
11157 varop_inner
= gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
11158 XEXP (varop_inner
, 0),
11160 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
11173 shift_mode
= result_mode
;
11174 if (shift_mode
!= mode
)
11176 /* We only change the modes of scalar shifts. */
11177 int_mode
= as_a
<scalar_int_mode
> (mode
);
11178 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11179 shift_mode
= try_widen_shift_mode (code
, varop
, count
, int_result_mode
,
11180 int_mode
, outer_op
, outer_const
);
11183 /* We have now finished analyzing the shift. The result should be
11184 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11185 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11186 to the result of the shift. OUTER_CONST is the relevant constant,
11187 but we must turn off all bits turned off in the shift. */
11189 if (outer_op
== UNKNOWN
11190 && orig_code
== code
&& orig_count
== count
11191 && varop
== orig_varop
11192 && shift_mode
== GET_MODE (varop
))
11195 /* Make a SUBREG if necessary. If we can't make it, fail. */
11196 varop
= gen_lowpart (shift_mode
, varop
);
11197 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
11200 /* If we have an outer operation and we just made a shift, it is
11201 possible that we could have simplified the shift were it not
11202 for the outer operation. So try to do the simplification
11205 if (outer_op
!= UNKNOWN
)
11206 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
11211 x
= simplify_gen_binary (code
, shift_mode
, varop
,
11212 gen_int_shift_amount (shift_mode
, count
));
11214 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11215 turn off all the bits that the shift would have turned off. */
11216 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
11217 /* We only change the modes of scalar shifts. */
11218 x
= simplify_and_const_int (NULL_RTX
, as_a
<scalar_int_mode
> (shift_mode
),
11219 x
, GET_MODE_MASK (result_mode
) >> orig_count
);
11221 /* Do the remainder of the processing in RESULT_MODE. */
11222 x
= gen_lowpart_or_truncate (result_mode
, x
);
11224 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11227 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
11229 if (outer_op
!= UNKNOWN
)
11231 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11233 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
11234 && GET_MODE_PRECISION (int_result_mode
) < HOST_BITS_PER_WIDE_INT
)
11235 outer_const
= trunc_int_for_mode (outer_const
, int_result_mode
);
11237 if (outer_op
== AND
)
11238 x
= simplify_and_const_int (NULL_RTX
, int_result_mode
, x
, outer_const
);
11239 else if (outer_op
== SET
)
11241 /* This means that we have determined that the result is
11242 equivalent to a constant. This should be rare. */
11243 if (!side_effects_p (x
))
11244 x
= GEN_INT (outer_const
);
11246 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
11247 x
= simplify_gen_unary (outer_op
, int_result_mode
, x
, int_result_mode
);
11249 x
= simplify_gen_binary (outer_op
, int_result_mode
, x
,
11250 GEN_INT (outer_const
));
11256 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11257 The result of the shift is RESULT_MODE. If we cannot simplify it,
11258 return X or, if it is NULL, synthesize the expression with
11259 simplify_gen_binary. Otherwise, return a simplified value.
11261 The shift is normally computed in the widest mode we find in VAROP, as
11262 long as it isn't a different number of words than RESULT_MODE. Exceptions
11263 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11266 simplify_shift_const (rtx x
, enum rtx_code code
, machine_mode result_mode
,
11267 rtx varop
, int count
)
11269 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
11274 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
,
11275 gen_int_shift_amount (GET_MODE (varop
), count
));
11276 if (GET_MODE (x
) != result_mode
)
11277 x
= gen_lowpart (result_mode
, x
);
11282 /* A subroutine of recog_for_combine. See there for arguments and
11286 recog_for_combine_1 (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11288 rtx pat
= *pnewpat
;
11289 rtx pat_without_clobbers
;
11290 int insn_code_number
;
11291 int num_clobbers_to_add
= 0;
11293 rtx notes
= NULL_RTX
;
11294 rtx old_notes
, old_pat
;
11297 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11298 we use to indicate that something didn't match. If we find such a
11299 thing, force rejection. */
11300 if (GET_CODE (pat
) == PARALLEL
)
11301 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
11302 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
11303 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
11306 old_pat
= PATTERN (insn
);
11307 old_notes
= REG_NOTES (insn
);
11308 PATTERN (insn
) = pat
;
11309 REG_NOTES (insn
) = NULL_RTX
;
11311 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11312 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11314 if (insn_code_number
< 0)
11315 fputs ("Failed to match this instruction:\n", dump_file
);
11317 fputs ("Successfully matched this instruction:\n", dump_file
);
11318 print_rtl_single (dump_file
, pat
);
11321 /* If it isn't, there is the possibility that we previously had an insn
11322 that clobbered some register as a side effect, but the combined
11323 insn doesn't need to do that. So try once more without the clobbers
11324 unless this represents an ASM insn. */
11326 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
11327 && GET_CODE (pat
) == PARALLEL
)
11331 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
11332 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
11335 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
11339 SUBST_INT (XVECLEN (pat
, 0), pos
);
11342 pat
= XVECEXP (pat
, 0, 0);
11344 PATTERN (insn
) = pat
;
11345 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11346 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11348 if (insn_code_number
< 0)
11349 fputs ("Failed to match this instruction:\n", dump_file
);
11351 fputs ("Successfully matched this instruction:\n", dump_file
);
11352 print_rtl_single (dump_file
, pat
);
11356 pat_without_clobbers
= pat
;
11358 PATTERN (insn
) = old_pat
;
11359 REG_NOTES (insn
) = old_notes
;
11361 /* Recognize all noop sets, these will be killed by followup pass. */
11362 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
11363 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
11365 /* If we had any clobbers to add, make a new pattern than contains
11366 them. Then check to make sure that all of them are dead. */
11367 if (num_clobbers_to_add
)
11369 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
11370 rtvec_alloc (GET_CODE (pat
) == PARALLEL
11371 ? (XVECLEN (pat
, 0)
11372 + num_clobbers_to_add
)
11373 : num_clobbers_to_add
+ 1));
11375 if (GET_CODE (pat
) == PARALLEL
)
11376 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11377 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
11379 XVECEXP (newpat
, 0, 0) = pat
;
11381 add_clobbers (newpat
, insn_code_number
);
11383 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
11384 i
< XVECLEN (newpat
, 0); i
++)
11386 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
11387 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
11389 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
11391 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
11392 notes
= alloc_reg_note (REG_UNUSED
,
11393 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
11399 if (insn_code_number
>= 0
11400 && insn_code_number
!= NOOP_MOVE_INSN_CODE
)
11402 old_pat
= PATTERN (insn
);
11403 old_notes
= REG_NOTES (insn
);
11404 old_icode
= INSN_CODE (insn
);
11405 PATTERN (insn
) = pat
;
11406 REG_NOTES (insn
) = notes
;
11407 INSN_CODE (insn
) = insn_code_number
;
11409 /* Allow targets to reject combined insn. */
11410 if (!targetm
.legitimate_combined_insn (insn
))
11412 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11413 fputs ("Instruction not appropriate for target.",
11416 /* Callers expect recog_for_combine to strip
11417 clobbers from the pattern on failure. */
11418 pat
= pat_without_clobbers
;
11421 insn_code_number
= -1;
11424 PATTERN (insn
) = old_pat
;
11425 REG_NOTES (insn
) = old_notes
;
11426 INSN_CODE (insn
) = old_icode
;
11432 return insn_code_number
;
11435 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11436 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11437 Return whether anything was so changed. */
11440 change_zero_ext (rtx pat
)
11442 bool changed
= false;
11443 rtx
*src
= &SET_SRC (pat
);
11445 subrtx_ptr_iterator::array_type array
;
11446 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11449 scalar_int_mode mode
, inner_mode
;
11450 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
11454 if (GET_CODE (x
) == ZERO_EXTRACT
11455 && CONST_INT_P (XEXP (x
, 1))
11456 && CONST_INT_P (XEXP (x
, 2))
11457 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
)
11458 && GET_MODE_PRECISION (inner_mode
) <= GET_MODE_PRECISION (mode
))
11460 size
= INTVAL (XEXP (x
, 1));
11462 int start
= INTVAL (XEXP (x
, 2));
11463 if (BITS_BIG_ENDIAN
)
11464 start
= GET_MODE_PRECISION (inner_mode
) - size
- start
;
11467 x
= gen_rtx_LSHIFTRT (inner_mode
, XEXP (x
, 0),
11468 gen_int_shift_amount (inner_mode
, start
));
11472 if (mode
!= inner_mode
)
11474 if (REG_P (x
) && HARD_REGISTER_P (x
)
11475 && !can_change_dest_mode (x
, 0, mode
))
11478 x
= gen_lowpart_SUBREG (mode
, x
);
11481 else if (GET_CODE (x
) == ZERO_EXTEND
11482 && GET_CODE (XEXP (x
, 0)) == SUBREG
11483 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x
, 0))))
11484 && !paradoxical_subreg_p (XEXP (x
, 0))
11485 && subreg_lowpart_p (XEXP (x
, 0)))
11487 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11488 size
= GET_MODE_PRECISION (inner_mode
);
11489 x
= SUBREG_REG (XEXP (x
, 0));
11490 if (GET_MODE (x
) != mode
)
11492 if (REG_P (x
) && HARD_REGISTER_P (x
)
11493 && !can_change_dest_mode (x
, 0, mode
))
11496 x
= gen_lowpart_SUBREG (mode
, x
);
11499 else if (GET_CODE (x
) == ZERO_EXTEND
11500 && REG_P (XEXP (x
, 0))
11501 && HARD_REGISTER_P (XEXP (x
, 0))
11502 && can_change_dest_mode (XEXP (x
, 0), 0, mode
))
11504 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11505 size
= GET_MODE_PRECISION (inner_mode
);
11506 x
= gen_rtx_REG (mode
, REGNO (XEXP (x
, 0)));
11511 if (!(GET_CODE (x
) == LSHIFTRT
11512 && CONST_INT_P (XEXP (x
, 1))
11513 && size
+ INTVAL (XEXP (x
, 1)) == GET_MODE_PRECISION (mode
)))
11515 wide_int mask
= wi::mask (size
, false, GET_MODE_PRECISION (mode
));
11516 x
= gen_rtx_AND (mode
, x
, immed_wide_int_const (mask
, mode
));
11524 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11525 maybe_swap_commutative_operands (**iter
);
11527 rtx
*dst
= &SET_DEST (pat
);
11528 scalar_int_mode mode
;
11529 if (GET_CODE (*dst
) == ZERO_EXTRACT
11530 && REG_P (XEXP (*dst
, 0))
11531 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (*dst
, 0)), &mode
)
11532 && CONST_INT_P (XEXP (*dst
, 1))
11533 && CONST_INT_P (XEXP (*dst
, 2)))
11535 rtx reg
= XEXP (*dst
, 0);
11536 int width
= INTVAL (XEXP (*dst
, 1));
11537 int offset
= INTVAL (XEXP (*dst
, 2));
11538 int reg_width
= GET_MODE_PRECISION (mode
);
11539 if (BITS_BIG_ENDIAN
)
11540 offset
= reg_width
- width
- offset
;
11543 wide_int mask
= wi::shifted_mask (offset
, width
, true, reg_width
);
11544 wide_int mask2
= wi::shifted_mask (offset
, width
, false, reg_width
);
11545 x
= gen_rtx_AND (mode
, reg
, immed_wide_int_const (mask
, mode
));
11547 y
= gen_rtx_ASHIFT (mode
, SET_SRC (pat
), GEN_INT (offset
));
11550 z
= gen_rtx_AND (mode
, y
, immed_wide_int_const (mask2
, mode
));
11551 w
= gen_rtx_IOR (mode
, x
, z
);
11552 SUBST (SET_DEST (pat
), reg
);
11553 SUBST (SET_SRC (pat
), w
);
11561 /* Like recog, but we receive the address of a pointer to a new pattern.
11562 We try to match the rtx that the pointer points to.
11563 If that fails, we may try to modify or replace the pattern,
11564 storing the replacement into the same pointer object.
11566 Modifications include deletion or addition of CLOBBERs. If the
11567 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11568 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11569 (and undo if that fails).
11571 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11572 the CLOBBERs are placed.
11574 The value is the final insn code from the pattern ultimately matched,
11578 recog_for_combine (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11580 rtx pat
= *pnewpat
;
11581 int insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11582 if (insn_code_number
>= 0 || check_asm_operands (pat
))
11583 return insn_code_number
;
11585 void *marker
= get_undo_marker ();
11586 bool changed
= false;
11588 if (GET_CODE (pat
) == SET
)
11590 /* For an unrecognized single set of a constant, try placing it in
11591 the constant pool, if this function already uses one. */
11592 rtx src
= SET_SRC (pat
);
11593 if (CONSTANT_P (src
)
11594 && !CONST_INT_P (src
)
11595 && crtl
->uses_const_pool
)
11597 machine_mode mode
= GET_MODE (src
);
11598 if (mode
== VOIDmode
)
11599 mode
= GET_MODE (SET_DEST (pat
));
11600 src
= force_const_mem (mode
, src
);
11603 SUBST (SET_SRC (pat
), src
);
11608 changed
= change_zero_ext (pat
);
11610 else if (GET_CODE (pat
) == PARALLEL
)
11613 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11615 rtx set
= XVECEXP (pat
, 0, i
);
11616 if (GET_CODE (set
) == SET
)
11617 changed
|= change_zero_ext (set
);
11623 insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11625 if (insn_code_number
< 0)
11626 undo_to_marker (marker
);
11629 return insn_code_number
;
11632 /* Like gen_lowpart_general but for use by combine. In combine it
11633 is not possible to create any new pseudoregs. However, it is
11634 safe to create invalid memory addresses, because combine will
11635 try to recognize them and all they will do is make the combine
11638 If for some reason this cannot do its job, an rtx
11639 (clobber (const_int 0)) is returned.
11640 An insn containing that will not be recognized. */
11643 gen_lowpart_for_combine (machine_mode omode
, rtx x
)
11645 machine_mode imode
= GET_MODE (x
);
11648 if (omode
== imode
)
11651 /* We can only support MODE being wider than a word if X is a
11652 constant integer or has a mode the same size. */
11653 if (maybe_gt (GET_MODE_SIZE (omode
), UNITS_PER_WORD
)
11654 && ! (CONST_SCALAR_INT_P (x
)
11655 || known_eq (GET_MODE_SIZE (imode
), GET_MODE_SIZE (omode
))))
11658 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11659 won't know what to do. So we will strip off the SUBREG here and
11660 process normally. */
11661 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
11663 x
= SUBREG_REG (x
);
11665 /* For use in case we fall down into the address adjustments
11666 further below, we need to adjust the known mode and size of
11667 x; imode and isize, since we just adjusted x. */
11668 imode
= GET_MODE (x
);
11670 if (imode
== omode
)
11674 result
= gen_lowpart_common (omode
, x
);
11681 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11683 if (MEM_VOLATILE_P (x
)
11684 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
)))
11687 /* If we want to refer to something bigger than the original memref,
11688 generate a paradoxical subreg instead. That will force a reload
11689 of the original memref X. */
11690 if (paradoxical_subreg_p (omode
, imode
))
11691 return gen_rtx_SUBREG (omode
, x
, 0);
11693 poly_int64 offset
= byte_lowpart_offset (omode
, imode
);
11694 return adjust_address_nv (x
, omode
, offset
);
11697 /* If X is a comparison operator, rewrite it in a new mode. This
11698 probably won't match, but may allow further simplifications. */
11699 else if (COMPARISON_P (x
)
11700 && SCALAR_INT_MODE_P (imode
)
11701 && SCALAR_INT_MODE_P (omode
))
11702 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
11704 /* If we couldn't simplify X any other way, just enclose it in a
11705 SUBREG. Normally, this SUBREG won't match, but some patterns may
11706 include an explicit SUBREG or we may simplify it further in combine. */
11711 if (imode
== VOIDmode
)
11713 imode
= int_mode_for_mode (omode
).require ();
11714 x
= gen_lowpart_common (imode
, x
);
11718 res
= lowpart_subreg (omode
, x
, imode
);
11724 return gen_rtx_CLOBBER (omode
, const0_rtx
);
11727 /* Try to simplify a comparison between OP0 and a constant OP1,
11728 where CODE is the comparison code that will be tested, into a
11729 (CODE OP0 const0_rtx) form.
11731 The result is a possibly different comparison code to use.
11732 *POP1 may be updated. */
11734 static enum rtx_code
11735 simplify_compare_const (enum rtx_code code
, machine_mode mode
,
11736 rtx op0
, rtx
*pop1
)
11738 scalar_int_mode int_mode
;
11739 HOST_WIDE_INT const_op
= INTVAL (*pop1
);
11741 /* Get the constant we are comparing against and turn off all bits
11742 not on in our mode. */
11743 if (mode
!= VOIDmode
)
11744 const_op
= trunc_int_for_mode (const_op
, mode
);
11746 /* If we are comparing against a constant power of two and the value
11747 being compared can only have that single bit nonzero (e.g., it was
11748 `and'ed with that bit), we can replace this with a comparison
11751 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
11752 || code
== LT
|| code
== LTU
)
11753 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11754 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11755 && pow2p_hwi (const_op
& GET_MODE_MASK (int_mode
))
11756 && (nonzero_bits (op0
, int_mode
)
11757 == (unsigned HOST_WIDE_INT
) (const_op
& GET_MODE_MASK (int_mode
))))
11759 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11763 /* Similarly, if we are comparing a value known to be either -1 or
11764 0 with -1, change it to the opposite comparison against zero. */
11766 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11767 || code
== GEU
|| code
== LTU
)
11768 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11769 && num_sign_bit_copies (op0
, int_mode
) == GET_MODE_PRECISION (int_mode
))
11771 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11775 /* Do some canonicalizations based on the comparison code. We prefer
11776 comparisons against zero and then prefer equality comparisons.
11777 If we can reduce the size of a constant, we will do that too. */
11781 /* < C is equivalent to <= (C - 1) */
11786 /* ... fall through to LE case below. */
11787 gcc_fallthrough ();
11793 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11800 /* If we are doing a <= 0 comparison on a value known to have
11801 a zero sign bit, we can replace this with == 0. */
11802 else if (const_op
== 0
11803 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11804 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11805 && (nonzero_bits (op0
, int_mode
)
11806 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11812 /* >= C is equivalent to > (C - 1). */
11817 /* ... fall through to GT below. */
11818 gcc_fallthrough ();
11824 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11831 /* If we are doing a > 0 comparison on a value known to have
11832 a zero sign bit, we can replace this with != 0. */
11833 else if (const_op
== 0
11834 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11835 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11836 && (nonzero_bits (op0
, int_mode
)
11837 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11843 /* < C is equivalent to <= (C - 1). */
11848 /* ... fall through ... */
11849 gcc_fallthrough ();
11851 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11852 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11853 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11854 && ((unsigned HOST_WIDE_INT
) const_op
11855 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11865 /* unsigned <= 0 is equivalent to == 0 */
11868 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11869 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11870 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11871 && ((unsigned HOST_WIDE_INT
) const_op
11872 == ((HOST_WIDE_INT_1U
11873 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1)))
11881 /* >= C is equivalent to > (C - 1). */
11886 /* ... fall through ... */
11887 gcc_fallthrough ();
11890 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11891 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11892 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11893 && ((unsigned HOST_WIDE_INT
) const_op
11894 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11904 /* unsigned > 0 is equivalent to != 0 */
11907 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11908 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11909 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11910 && ((unsigned HOST_WIDE_INT
) const_op
11911 == (HOST_WIDE_INT_1U
11912 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1))
11923 *pop1
= GEN_INT (const_op
);
11927 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11928 comparison code that will be tested.
11930 The result is a possibly different comparison code to use. *POP0 and
11931 *POP1 may be updated.
11933 It is possible that we might detect that a comparison is either always
11934 true or always false. However, we do not perform general constant
11935 folding in combine, so this knowledge isn't useful. Such tautologies
11936 should have been detected earlier. Hence we ignore all such cases. */
11938 static enum rtx_code
11939 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
11945 scalar_int_mode mode
, inner_mode
, tmode
;
11946 opt_scalar_int_mode tmode_iter
;
11948 /* Try a few ways of applying the same transformation to both operands. */
11951 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11952 so check specially. */
11953 if (!WORD_REGISTER_OPERATIONS
11954 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
11955 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
11956 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11957 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
11958 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
11959 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
11960 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &mode
)
11961 && (is_a
<scalar_int_mode
>
11962 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))), &inner_mode
))
11963 && inner_mode
== GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0)))
11964 && CONST_INT_P (XEXP (op0
, 1))
11965 && XEXP (op0
, 1) == XEXP (op1
, 1)
11966 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
11967 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
11968 && (INTVAL (XEXP (op0
, 1))
11969 == (GET_MODE_PRECISION (mode
)
11970 - GET_MODE_PRECISION (inner_mode
))))
11972 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
11973 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
11976 /* If both operands are the same constant shift, see if we can ignore the
11977 shift. We can if the shift is a rotate or if the bits shifted out of
11978 this shift are known to be zero for both inputs and if the type of
11979 comparison is compatible with the shift. */
11980 if (GET_CODE (op0
) == GET_CODE (op1
)
11981 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
11982 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
11983 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
11984 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
11985 || (GET_CODE (op0
) == ASHIFTRT
11986 && (code
!= GTU
&& code
!= LTU
11987 && code
!= GEU
&& code
!= LEU
)))
11988 && CONST_INT_P (XEXP (op0
, 1))
11989 && INTVAL (XEXP (op0
, 1)) >= 0
11990 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
11991 && XEXP (op0
, 1) == XEXP (op1
, 1))
11993 machine_mode mode
= GET_MODE (op0
);
11994 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11995 int shift_count
= INTVAL (XEXP (op0
, 1));
11997 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
11998 mask
&= (mask
>> shift_count
) << shift_count
;
11999 else if (GET_CODE (op0
) == ASHIFT
)
12000 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
12002 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
12003 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
12004 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
12009 /* If both operands are AND's of a paradoxical SUBREG by constant, the
12010 SUBREGs are of the same mode, and, in both cases, the AND would
12011 be redundant if the comparison was done in the narrower mode,
12012 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12013 and the operand's possibly nonzero bits are 0xffffff01; in that case
12014 if we only care about QImode, we don't need the AND). This case
12015 occurs if the output mode of an scc insn is not SImode and
12016 STORE_FLAG_VALUE == 1 (e.g., the 386).
12018 Similarly, check for a case where the AND's are ZERO_EXTEND
12019 operations from some narrower mode even though a SUBREG is not
12022 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
12023 && CONST_INT_P (XEXP (op0
, 1))
12024 && CONST_INT_P (XEXP (op1
, 1)))
12026 rtx inner_op0
= XEXP (op0
, 0);
12027 rtx inner_op1
= XEXP (op1
, 0);
12028 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
12029 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
12032 if (paradoxical_subreg_p (inner_op0
)
12033 && GET_CODE (inner_op1
) == SUBREG
12034 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0
)))
12035 && (GET_MODE (SUBREG_REG (inner_op0
))
12036 == GET_MODE (SUBREG_REG (inner_op1
)))
12037 && ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
12038 GET_MODE (SUBREG_REG (inner_op0
)))) == 0
12039 && ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
12040 GET_MODE (SUBREG_REG (inner_op1
)))) == 0)
12042 op0
= SUBREG_REG (inner_op0
);
12043 op1
= SUBREG_REG (inner_op1
);
12045 /* The resulting comparison is always unsigned since we masked
12046 off the original sign bit. */
12047 code
= unsigned_condition (code
);
12053 FOR_EACH_MODE_UNTIL (tmode
,
12054 as_a
<scalar_int_mode
> (GET_MODE (op0
)))
12055 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
12057 op0
= gen_lowpart_or_truncate (tmode
, inner_op0
);
12058 op1
= gen_lowpart_or_truncate (tmode
, inner_op1
);
12059 code
= unsigned_condition (code
);
12068 /* If both operands are NOT, we can strip off the outer operation
12069 and adjust the comparison code for swapped operands; similarly for
12070 NEG, except that this must be an equality comparison. */
12071 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
12072 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
12073 && (code
== EQ
|| code
== NE
)))
12074 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
12080 /* If the first operand is a constant, swap the operands and adjust the
12081 comparison code appropriately, but don't do this if the second operand
12082 is already a constant integer. */
12083 if (swap_commutative_operands_p (op0
, op1
))
12085 std::swap (op0
, op1
);
12086 code
= swap_condition (code
);
12089 /* We now enter a loop during which we will try to simplify the comparison.
12090 For the most part, we only are concerned with comparisons with zero,
12091 but some things may really be comparisons with zero but not start
12092 out looking that way. */
12094 while (CONST_INT_P (op1
))
12096 machine_mode raw_mode
= GET_MODE (op0
);
12097 scalar_int_mode int_mode
;
12098 int equality_comparison_p
;
12099 int sign_bit_comparison_p
;
12100 int unsigned_comparison_p
;
12101 HOST_WIDE_INT const_op
;
12103 /* We only want to handle integral modes. This catches VOIDmode,
12104 CCmode, and the floating-point modes. An exception is that we
12105 can handle VOIDmode if OP0 is a COMPARE or a comparison
12108 if (GET_MODE_CLASS (raw_mode
) != MODE_INT
12109 && ! (raw_mode
== VOIDmode
12110 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
12113 /* Try to simplify the compare to constant, possibly changing the
12114 comparison op, and/or changing op1 to zero. */
12115 code
= simplify_compare_const (code
, raw_mode
, op0
, &op1
);
12116 const_op
= INTVAL (op1
);
12118 /* Compute some predicates to simplify code below. */
12120 equality_comparison_p
= (code
== EQ
|| code
== NE
);
12121 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
12122 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
12125 /* If this is a sign bit comparison and we can do arithmetic in
12126 MODE, say that we will only be needing the sign bit of OP0. */
12127 if (sign_bit_comparison_p
12128 && is_a
<scalar_int_mode
> (raw_mode
, &int_mode
)
12129 && HWI_COMPUTABLE_MODE_P (int_mode
))
12130 op0
= force_to_mode (op0
, int_mode
,
12132 << (GET_MODE_PRECISION (int_mode
) - 1),
12135 if (COMPARISON_P (op0
))
12137 /* We can't do anything if OP0 is a condition code value, rather
12138 than an actual data value. */
12140 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
12143 /* Get the two operands being compared. */
12144 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
12145 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
12147 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
12149 /* Check for the cases where we simply want the result of the
12150 earlier test or the opposite of that result. */
12151 if (code
== NE
|| code
== EQ
12152 || (val_signbit_known_set_p (raw_mode
, STORE_FLAG_VALUE
)
12153 && (code
== LT
|| code
== GE
)))
12155 enum rtx_code new_code
;
12156 if (code
== LT
|| code
== NE
)
12157 new_code
= GET_CODE (op0
);
12159 new_code
= reversed_comparison_code (op0
, NULL
);
12161 if (new_code
!= UNKNOWN
)
12172 if (raw_mode
== VOIDmode
)
12174 scalar_int_mode mode
= as_a
<scalar_int_mode
> (raw_mode
);
12176 /* Now try cases based on the opcode of OP0. If none of the cases
12177 does a "continue", we exit this loop immediately after the
12180 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
12181 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
12182 switch (GET_CODE (op0
))
12185 /* If we are extracting a single bit from a variable position in
12186 a constant that has only a single bit set and are comparing it
12187 with zero, we can convert this into an equality comparison
12188 between the position and the location of the single bit. */
12189 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12190 have already reduced the shift count modulo the word size. */
12191 if (!SHIFT_COUNT_TRUNCATED
12192 && CONST_INT_P (XEXP (op0
, 0))
12193 && XEXP (op0
, 1) == const1_rtx
12194 && equality_comparison_p
&& const_op
== 0
12195 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
12197 if (BITS_BIG_ENDIAN
)
12198 i
= BITS_PER_WORD
- 1 - i
;
12200 op0
= XEXP (op0
, 2);
12204 /* Result is nonzero iff shift count is equal to I. */
12205 code
= reverse_condition (code
);
12212 tem
= expand_compound_operation (op0
);
12221 /* If testing for equality, we can take the NOT of the constant. */
12222 if (equality_comparison_p
12223 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
12225 op0
= XEXP (op0
, 0);
12230 /* If just looking at the sign bit, reverse the sense of the
12232 if (sign_bit_comparison_p
)
12234 op0
= XEXP (op0
, 0);
12235 code
= (code
== GE
? LT
: GE
);
12241 /* If testing for equality, we can take the NEG of the constant. */
12242 if (equality_comparison_p
12243 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
12245 op0
= XEXP (op0
, 0);
12250 /* The remaining cases only apply to comparisons with zero. */
12254 /* When X is ABS or is known positive,
12255 (neg X) is < 0 if and only if X != 0. */
12257 if (sign_bit_comparison_p
12258 && (GET_CODE (XEXP (op0
, 0)) == ABS
12259 || (mode_width
<= HOST_BITS_PER_WIDE_INT
12260 && (nonzero_bits (XEXP (op0
, 0), mode
)
12261 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12264 op0
= XEXP (op0
, 0);
12265 code
= (code
== LT
? NE
: EQ
);
12269 /* If we have NEG of something whose two high-order bits are the
12270 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12271 if (num_sign_bit_copies (op0
, mode
) >= 2)
12273 op0
= XEXP (op0
, 0);
12274 code
= swap_condition (code
);
12280 /* If we are testing equality and our count is a constant, we
12281 can perform the inverse operation on our RHS. */
12282 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12283 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
12284 op1
, XEXP (op0
, 1))) != 0)
12286 op0
= XEXP (op0
, 0);
12291 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12292 a particular bit. Convert it to an AND of a constant of that
12293 bit. This will be converted into a ZERO_EXTRACT. */
12294 if (const_op
== 0 && sign_bit_comparison_p
12295 && CONST_INT_P (XEXP (op0
, 1))
12296 && mode_width
<= HOST_BITS_PER_WIDE_INT
12297 && UINTVAL (XEXP (op0
, 1)) < mode_width
)
12299 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12302 - INTVAL (XEXP (op0
, 1)))));
12303 code
= (code
== LT
? NE
: EQ
);
12307 /* Fall through. */
12310 /* ABS is ignorable inside an equality comparison with zero. */
12311 if (const_op
== 0 && equality_comparison_p
)
12313 op0
= XEXP (op0
, 0);
12319 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12320 (compare FOO CONST) if CONST fits in FOO's mode and we
12321 are either testing inequality or have an unsigned
12322 comparison with ZERO_EXTEND or a signed comparison with
12323 SIGN_EXTEND. But don't do it if we don't have a compare
12324 insn of the given mode, since we'd have to revert it
12325 later on, and then we wouldn't know whether to sign- or
12327 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12328 && ! unsigned_comparison_p
12329 && HWI_COMPUTABLE_MODE_P (mode
)
12330 && trunc_int_for_mode (const_op
, mode
) == const_op
12331 && have_insn_for (COMPARE
, mode
))
12333 op0
= XEXP (op0
, 0);
12339 /* Check for the case where we are comparing A - C1 with C2, that is
12341 (subreg:MODE (plus (A) (-C1))) op (C2)
12343 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12344 comparison in the wider mode. One of the following two conditions
12345 must be true in order for this to be valid:
12347 1. The mode extension results in the same bit pattern being added
12348 on both sides and the comparison is equality or unsigned. As
12349 C2 has been truncated to fit in MODE, the pattern can only be
12352 2. The mode extension results in the sign bit being copied on
12355 The difficulty here is that we have predicates for A but not for
12356 (A - C1) so we need to check that C1 is within proper bounds so
12357 as to perturbate A as little as possible. */
12359 if (mode_width
<= HOST_BITS_PER_WIDE_INT
12360 && subreg_lowpart_p (op0
)
12361 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
12363 && GET_MODE_PRECISION (inner_mode
) > mode_width
12364 && GET_CODE (SUBREG_REG (op0
)) == PLUS
12365 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
12367 rtx a
= XEXP (SUBREG_REG (op0
), 0);
12368 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
12371 && (unsigned HOST_WIDE_INT
) c1
12372 < HOST_WIDE_INT_1U
<< (mode_width
- 1)
12373 && (equality_comparison_p
|| unsigned_comparison_p
)
12374 /* (A - C1) zero-extends if it is positive and sign-extends
12375 if it is negative, C2 both zero- and sign-extends. */
12376 && (((nonzero_bits (a
, inner_mode
)
12377 & ~GET_MODE_MASK (mode
)) == 0
12379 /* (A - C1) sign-extends if it is positive and 1-extends
12380 if it is negative, C2 both sign- and 1-extends. */
12381 || (num_sign_bit_copies (a
, inner_mode
)
12382 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12385 || ((unsigned HOST_WIDE_INT
) c1
12386 < HOST_WIDE_INT_1U
<< (mode_width
- 2)
12387 /* (A - C1) always sign-extends, like C2. */
12388 && num_sign_bit_copies (a
, inner_mode
)
12389 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12390 - (mode_width
- 1))))
12392 op0
= SUBREG_REG (op0
);
12397 /* If the inner mode is narrower and we are extracting the low part,
12398 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12399 if (paradoxical_subreg_p (op0
))
12401 else if (subreg_lowpart_p (op0
)
12402 && GET_MODE_CLASS (mode
) == MODE_INT
12403 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12404 && (code
== NE
|| code
== EQ
)
12405 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12406 && !paradoxical_subreg_p (op0
)
12407 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12408 & ~GET_MODE_MASK (mode
)) == 0)
12410 /* Remove outer subregs that don't do anything. */
12411 tem
= gen_lowpart (inner_mode
, op1
);
12413 if ((nonzero_bits (tem
, inner_mode
)
12414 & ~GET_MODE_MASK (mode
)) == 0)
12416 op0
= SUBREG_REG (op0
);
12428 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12429 && (unsigned_comparison_p
|| equality_comparison_p
)
12430 && HWI_COMPUTABLE_MODE_P (mode
)
12431 && (unsigned HOST_WIDE_INT
) const_op
<= GET_MODE_MASK (mode
)
12433 && have_insn_for (COMPARE
, mode
))
12435 op0
= XEXP (op0
, 0);
12441 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12442 this for equality comparisons due to pathological cases involving
12444 if (equality_comparison_p
12445 && (tem
= simplify_binary_operation (MINUS
, mode
,
12446 op1
, XEXP (op0
, 1))) != 0)
12448 op0
= XEXP (op0
, 0);
12453 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12454 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
12455 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
12457 op0
= XEXP (XEXP (op0
, 0), 0);
12458 code
= (code
== LT
? EQ
: NE
);
12464 /* We used to optimize signed comparisons against zero, but that
12465 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12466 arrive here as equality comparisons, or (GEU, LTU) are
12467 optimized away. No need to special-case them. */
12469 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12470 (eq B (minus A C)), whichever simplifies. We can only do
12471 this for equality comparisons due to pathological cases involving
12473 if (equality_comparison_p
12474 && (tem
= simplify_binary_operation (PLUS
, mode
,
12475 XEXP (op0
, 1), op1
)) != 0)
12477 op0
= XEXP (op0
, 0);
12482 if (equality_comparison_p
12483 && (tem
= simplify_binary_operation (MINUS
, mode
,
12484 XEXP (op0
, 0), op1
)) != 0)
12486 op0
= XEXP (op0
, 1);
12491 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12492 of bits in X minus 1, is one iff X > 0. */
12493 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
12494 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12495 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
12496 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12498 op0
= XEXP (op0
, 1);
12499 code
= (code
== GE
? LE
: GT
);
12505 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12506 if C is zero or B is a constant. */
12507 if (equality_comparison_p
12508 && (tem
= simplify_binary_operation (XOR
, mode
,
12509 XEXP (op0
, 1), op1
)) != 0)
12511 op0
= XEXP (op0
, 0);
12519 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12521 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
12522 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
12523 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12525 op0
= XEXP (op0
, 1);
12526 code
= (code
== GE
? GT
: LE
);
12532 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12533 will be converted to a ZERO_EXTRACT later. */
12534 if (const_op
== 0 && equality_comparison_p
12535 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12536 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
12538 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
12539 XEXP (XEXP (op0
, 0), 1));
12540 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12544 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12545 zero and X is a comparison and C1 and C2 describe only bits set
12546 in STORE_FLAG_VALUE, we can compare with X. */
12547 if (const_op
== 0 && equality_comparison_p
12548 && mode_width
<= HOST_BITS_PER_WIDE_INT
12549 && CONST_INT_P (XEXP (op0
, 1))
12550 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
12551 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12552 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
12553 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
12555 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12556 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
12557 if ((~STORE_FLAG_VALUE
& mask
) == 0
12558 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
12559 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
12560 && COMPARISON_P (tem
))))
12562 op0
= XEXP (XEXP (op0
, 0), 0);
12567 /* If we are doing an equality comparison of an AND of a bit equal
12568 to the sign bit, replace this with a LT or GE comparison of
12569 the underlying value. */
12570 if (equality_comparison_p
12572 && CONST_INT_P (XEXP (op0
, 1))
12573 && mode_width
<= HOST_BITS_PER_WIDE_INT
12574 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12575 == HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12577 op0
= XEXP (op0
, 0);
12578 code
= (code
== EQ
? GE
: LT
);
12582 /* If this AND operation is really a ZERO_EXTEND from a narrower
12583 mode, the constant fits within that mode, and this is either an
12584 equality or unsigned comparison, try to do this comparison in
12589 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12590 -> (ne:DI (reg:SI 4) (const_int 0))
12592 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12593 known to hold a value of the required mode the
12594 transformation is invalid. */
12595 if ((equality_comparison_p
|| unsigned_comparison_p
)
12596 && CONST_INT_P (XEXP (op0
, 1))
12597 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
12598 & GET_MODE_MASK (mode
))
12600 && const_op
>> i
== 0
12601 && int_mode_for_size (i
, 1).exists (&tmode
))
12603 op0
= gen_lowpart_or_truncate (tmode
, XEXP (op0
, 0));
12607 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12608 fits in both M1 and M2 and the SUBREG is either paradoxical
12609 or represents the low part, permute the SUBREG and the AND
12611 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
12612 && CONST_INT_P (XEXP (op0
, 1)))
12614 unsigned HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
12615 /* Require an integral mode, to avoid creating something like
12617 if ((is_a
<scalar_int_mode
>
12618 (GET_MODE (SUBREG_REG (XEXP (op0
, 0))), &tmode
))
12619 /* It is unsafe to commute the AND into the SUBREG if the
12620 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12621 not defined. As originally written the upper bits
12622 have a defined value due to the AND operation.
12623 However, if we commute the AND inside the SUBREG then
12624 they no longer have defined values and the meaning of
12625 the code has been changed.
12626 Also C1 should not change value in the smaller mode,
12627 see PR67028 (a positive C1 can become negative in the
12628 smaller mode, so that the AND does no longer mask the
12630 && ((WORD_REGISTER_OPERATIONS
12631 && mode_width
> GET_MODE_PRECISION (tmode
)
12632 && mode_width
<= BITS_PER_WORD
12633 && trunc_int_for_mode (c1
, tmode
) == (HOST_WIDE_INT
) c1
)
12634 || (mode_width
<= GET_MODE_PRECISION (tmode
)
12635 && subreg_lowpart_p (XEXP (op0
, 0))))
12636 && mode_width
<= HOST_BITS_PER_WIDE_INT
12637 && HWI_COMPUTABLE_MODE_P (tmode
)
12638 && (c1
& ~mask
) == 0
12639 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
12641 && c1
!= GET_MODE_MASK (tmode
))
12643 op0
= simplify_gen_binary (AND
, tmode
,
12644 SUBREG_REG (XEXP (op0
, 0)),
12645 gen_int_mode (c1
, tmode
));
12646 op0
= gen_lowpart (mode
, op0
);
12651 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12652 if (const_op
== 0 && equality_comparison_p
12653 && XEXP (op0
, 1) == const1_rtx
12654 && GET_CODE (XEXP (op0
, 0)) == NOT
)
12656 op0
= simplify_and_const_int (NULL_RTX
, mode
,
12657 XEXP (XEXP (op0
, 0), 0), 1);
12658 code
= (code
== NE
? EQ
: NE
);
12662 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12663 (eq (and (lshiftrt X) 1) 0).
12664 Also handle the case where (not X) is expressed using xor. */
12665 if (const_op
== 0 && equality_comparison_p
12666 && XEXP (op0
, 1) == const1_rtx
12667 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
12669 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
12670 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
12672 if (GET_CODE (shift_op
) == NOT
12673 || (GET_CODE (shift_op
) == XOR
12674 && CONST_INT_P (XEXP (shift_op
, 1))
12675 && CONST_INT_P (shift_count
)
12676 && HWI_COMPUTABLE_MODE_P (mode
)
12677 && (UINTVAL (XEXP (shift_op
, 1))
12678 == HOST_WIDE_INT_1U
12679 << INTVAL (shift_count
))))
12682 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
12683 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12684 code
= (code
== NE
? EQ
: NE
);
12691 /* If we have (compare (ashift FOO N) (const_int C)) and
12692 the high order N bits of FOO (N+1 if an inequality comparison)
12693 are known to be zero, we can do this by comparing FOO with C
12694 shifted right N bits so long as the low-order N bits of C are
12696 if (CONST_INT_P (XEXP (op0
, 1))
12697 && INTVAL (XEXP (op0
, 1)) >= 0
12698 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
12699 < HOST_BITS_PER_WIDE_INT
)
12700 && (((unsigned HOST_WIDE_INT
) const_op
12701 & ((HOST_WIDE_INT_1U
<< INTVAL (XEXP (op0
, 1)))
12703 && mode_width
<= HOST_BITS_PER_WIDE_INT
12704 && (nonzero_bits (XEXP (op0
, 0), mode
)
12705 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
12706 + ! equality_comparison_p
))) == 0)
12708 /* We must perform a logical shift, not an arithmetic one,
12709 as we want the top N bits of C to be zero. */
12710 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
12712 temp
>>= INTVAL (XEXP (op0
, 1));
12713 op1
= gen_int_mode (temp
, mode
);
12714 op0
= XEXP (op0
, 0);
12718 /* If we are doing a sign bit comparison, it means we are testing
12719 a particular bit. Convert it to the appropriate AND. */
12720 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12721 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12723 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12726 - INTVAL (XEXP (op0
, 1)))));
12727 code
= (code
== LT
? NE
: EQ
);
12731 /* If this an equality comparison with zero and we are shifting
12732 the low bit to the sign bit, we can convert this to an AND of the
12734 if (const_op
== 0 && equality_comparison_p
12735 && CONST_INT_P (XEXP (op0
, 1))
12736 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12738 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
12744 /* If this is an equality comparison with zero, we can do this
12745 as a logical shift, which might be much simpler. */
12746 if (equality_comparison_p
&& const_op
== 0
12747 && CONST_INT_P (XEXP (op0
, 1)))
12749 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
12751 INTVAL (XEXP (op0
, 1)));
12755 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12756 do the comparison in a narrower mode. */
12757 if (! unsigned_comparison_p
12758 && CONST_INT_P (XEXP (op0
, 1))
12759 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12760 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12761 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12763 && (((unsigned HOST_WIDE_INT
) const_op
12764 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12765 <= GET_MODE_MASK (tmode
)))
12767 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
12771 /* Likewise if OP0 is a PLUS of a sign extension with a
12772 constant, which is usually represented with the PLUS
12773 between the shifts. */
12774 if (! unsigned_comparison_p
12775 && CONST_INT_P (XEXP (op0
, 1))
12776 && GET_CODE (XEXP (op0
, 0)) == PLUS
12777 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12778 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
12779 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
12780 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12782 && (((unsigned HOST_WIDE_INT
) const_op
12783 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12784 <= GET_MODE_MASK (tmode
)))
12786 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
12787 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
12788 rtx new_const
= simplify_gen_binary (ASHIFTRT
, mode
,
12789 add_const
, XEXP (op0
, 1));
12791 op0
= simplify_gen_binary (PLUS
, tmode
,
12792 gen_lowpart (tmode
, inner
),
12799 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12800 the low order N bits of FOO are known to be zero, we can do this
12801 by comparing FOO with C shifted left N bits so long as no
12802 overflow occurs. Even if the low order N bits of FOO aren't known
12803 to be zero, if the comparison is >= or < we can use the same
12804 optimization and for > or <= by setting all the low
12805 order N bits in the comparison constant. */
12806 if (CONST_INT_P (XEXP (op0
, 1))
12807 && INTVAL (XEXP (op0
, 1)) > 0
12808 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12809 && mode_width
<= HOST_BITS_PER_WIDE_INT
12810 && (((unsigned HOST_WIDE_INT
) const_op
12811 + (GET_CODE (op0
) != LSHIFTRT
12812 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
12815 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
12817 unsigned HOST_WIDE_INT low_bits
12818 = (nonzero_bits (XEXP (op0
, 0), mode
)
12819 & ((HOST_WIDE_INT_1U
12820 << INTVAL (XEXP (op0
, 1))) - 1));
12821 if (low_bits
== 0 || !equality_comparison_p
)
12823 /* If the shift was logical, then we must make the condition
12825 if (GET_CODE (op0
) == LSHIFTRT
)
12826 code
= unsigned_condition (code
);
12828 const_op
= (unsigned HOST_WIDE_INT
) const_op
12829 << INTVAL (XEXP (op0
, 1));
12831 && (code
== GT
|| code
== GTU
12832 || code
== LE
|| code
== LEU
))
12834 |= ((HOST_WIDE_INT_1
<< INTVAL (XEXP (op0
, 1))) - 1);
12835 op1
= GEN_INT (const_op
);
12836 op0
= XEXP (op0
, 0);
12841 /* If we are using this shift to extract just the sign bit, we
12842 can replace this with an LT or GE comparison. */
12844 && (equality_comparison_p
|| sign_bit_comparison_p
)
12845 && CONST_INT_P (XEXP (op0
, 1))
12846 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12848 op0
= XEXP (op0
, 0);
12849 code
= (code
== NE
|| code
== GT
? LT
: GE
);
12861 /* Now make any compound operations involved in this comparison. Then,
12862 check for an outmost SUBREG on OP0 that is not doing anything or is
12863 paradoxical. The latter transformation must only be performed when
12864 it is known that the "extra" bits will be the same in op0 and op1 or
12865 that they don't matter. There are three cases to consider:
12867 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12868 care bits and we can assume they have any convenient value. So
12869 making the transformation is safe.
12871 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12872 In this case the upper bits of op0 are undefined. We should not make
12873 the simplification in that case as we do not know the contents of
12876 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12877 In that case we know those bits are zeros or ones. We must also be
12878 sure that they are the same as the upper bits of op1.
12880 We can never remove a SUBREG for a non-equality comparison because
12881 the sign bit is in a different place in the underlying object. */
12883 rtx_code op0_mco_code
= SET
;
12884 if (op1
== const0_rtx
)
12885 op0_mco_code
= code
== NE
|| code
== EQ
? EQ
: COMPARE
;
12887 op0
= make_compound_operation (op0
, op0_mco_code
);
12888 op1
= make_compound_operation (op1
, SET
);
12890 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
12891 && is_int_mode (GET_MODE (op0
), &mode
)
12892 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12893 && (code
== NE
|| code
== EQ
))
12895 if (paradoxical_subreg_p (op0
))
12897 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12899 if (REG_P (SUBREG_REG (op0
)))
12901 op0
= SUBREG_REG (op0
);
12902 op1
= gen_lowpart (inner_mode
, op1
);
12905 else if (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12906 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12907 & ~GET_MODE_MASK (mode
)) == 0)
12909 tem
= gen_lowpart (inner_mode
, op1
);
12911 if ((nonzero_bits (tem
, inner_mode
) & ~GET_MODE_MASK (mode
)) == 0)
12912 op0
= SUBREG_REG (op0
), op1
= tem
;
12916 /* We now do the opposite procedure: Some machines don't have compare
12917 insns in all modes. If OP0's mode is an integer mode smaller than a
12918 word and we can't do a compare in that mode, see if there is a larger
12919 mode for which we can do the compare. There are a number of cases in
12920 which we can use the wider mode. */
12922 if (is_int_mode (GET_MODE (op0
), &mode
)
12923 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
12924 && ! have_insn_for (COMPARE
, mode
))
12925 FOR_EACH_WIDER_MODE (tmode_iter
, mode
)
12927 tmode
= tmode_iter
.require ();
12928 if (!HWI_COMPUTABLE_MODE_P (tmode
))
12930 if (have_insn_for (COMPARE
, tmode
))
12934 /* If this is a test for negative, we can make an explicit
12935 test of the sign bit. Test this first so we can use
12936 a paradoxical subreg to extend OP0. */
12938 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
12939 && HWI_COMPUTABLE_MODE_P (mode
))
12941 unsigned HOST_WIDE_INT sign
12942 = HOST_WIDE_INT_1U
<< (GET_MODE_BITSIZE (mode
) - 1);
12943 op0
= simplify_gen_binary (AND
, tmode
,
12944 gen_lowpart (tmode
, op0
),
12945 gen_int_mode (sign
, tmode
));
12946 code
= (code
== LT
) ? NE
: EQ
;
12950 /* If the only nonzero bits in OP0 and OP1 are those in the
12951 narrower mode and this is an equality or unsigned comparison,
12952 we can use the wider mode. Similarly for sign-extended
12953 values, in which case it is true for all comparisons. */
12954 zero_extended
= ((code
== EQ
|| code
== NE
12955 || code
== GEU
|| code
== GTU
12956 || code
== LEU
|| code
== LTU
)
12957 && (nonzero_bits (op0
, tmode
)
12958 & ~GET_MODE_MASK (mode
)) == 0
12959 && ((CONST_INT_P (op1
)
12960 || (nonzero_bits (op1
, tmode
)
12961 & ~GET_MODE_MASK (mode
)) == 0)));
12964 || ((num_sign_bit_copies (op0
, tmode
)
12965 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12966 - GET_MODE_PRECISION (mode
)))
12967 && (num_sign_bit_copies (op1
, tmode
)
12968 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12969 - GET_MODE_PRECISION (mode
)))))
12971 /* If OP0 is an AND and we don't have an AND in MODE either,
12972 make a new AND in the proper mode. */
12973 if (GET_CODE (op0
) == AND
12974 && !have_insn_for (AND
, mode
))
12975 op0
= simplify_gen_binary (AND
, tmode
,
12976 gen_lowpart (tmode
,
12978 gen_lowpart (tmode
,
12984 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
12986 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
12991 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
12993 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
13002 /* We may have changed the comparison operands. Re-canonicalize. */
13003 if (swap_commutative_operands_p (op0
, op1
))
13005 std::swap (op0
, op1
);
13006 code
= swap_condition (code
);
13009 /* If this machine only supports a subset of valid comparisons, see if we
13010 can convert an unsupported one into a supported one. */
13011 target_canonicalize_comparison (&code
, &op0
, &op1
, 0);
13019 /* Utility function for record_value_for_reg. Count number of
13024 enum rtx_code code
= GET_CODE (x
);
13028 if (GET_RTX_CLASS (code
) == RTX_BIN_ARITH
13029 || GET_RTX_CLASS (code
) == RTX_COMM_ARITH
)
13031 rtx x0
= XEXP (x
, 0);
13032 rtx x1
= XEXP (x
, 1);
13035 return 1 + 2 * count_rtxs (x0
);
13037 if ((GET_RTX_CLASS (GET_CODE (x1
)) == RTX_BIN_ARITH
13038 || GET_RTX_CLASS (GET_CODE (x1
)) == RTX_COMM_ARITH
)
13039 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13040 return 2 + 2 * count_rtxs (x0
)
13041 + count_rtxs (x
== XEXP (x1
, 0)
13042 ? XEXP (x1
, 1) : XEXP (x1
, 0));
13044 if ((GET_RTX_CLASS (GET_CODE (x0
)) == RTX_BIN_ARITH
13045 || GET_RTX_CLASS (GET_CODE (x0
)) == RTX_COMM_ARITH
)
13046 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13047 return 2 + 2 * count_rtxs (x1
)
13048 + count_rtxs (x
== XEXP (x0
, 0)
13049 ? XEXP (x0
, 1) : XEXP (x0
, 0));
13052 fmt
= GET_RTX_FORMAT (code
);
13053 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13055 ret
+= count_rtxs (XEXP (x
, i
));
13056 else if (fmt
[i
] == 'E')
13057 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13058 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
13063 /* Utility function for following routine. Called when X is part of a value
13064 being stored into last_set_value. Sets last_set_table_tick
13065 for each register mentioned. Similar to mention_regs in cse.cc */
13068 update_table_tick (rtx x
)
13070 enum rtx_code code
= GET_CODE (x
);
13071 const char *fmt
= GET_RTX_FORMAT (code
);
13076 unsigned int regno
= REGNO (x
);
13077 unsigned int endregno
= END_REGNO (x
);
13080 for (r
= regno
; r
< endregno
; r
++)
13082 reg_stat_type
*rsp
= ®_stat
[r
];
13083 rsp
->last_set_table_tick
= label_tick
;
13089 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13092 /* Check for identical subexpressions. If x contains
13093 identical subexpression we only have to traverse one of
13095 if (i
== 0 && ARITHMETIC_P (x
))
13097 /* Note that at this point x1 has already been
13099 rtx x0
= XEXP (x
, 0);
13100 rtx x1
= XEXP (x
, 1);
13102 /* If x0 and x1 are identical then there is no need to
13107 /* If x0 is identical to a subexpression of x1 then while
13108 processing x1, x0 has already been processed. Thus we
13109 are done with x. */
13110 if (ARITHMETIC_P (x1
)
13111 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13114 /* If x1 is identical to a subexpression of x0 then we
13115 still have to process the rest of x0. */
13116 if (ARITHMETIC_P (x0
)
13117 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13119 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
13124 update_table_tick (XEXP (x
, i
));
13126 else if (fmt
[i
] == 'E')
13127 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13128 update_table_tick (XVECEXP (x
, i
, j
));
13131 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13132 are saying that the register is clobbered and we no longer know its
13133 value. If INSN is zero, don't update reg_stat[].last_set; this is
13134 only permitted with VALUE also zero and is used to invalidate the
13138 record_value_for_reg (rtx reg
, rtx_insn
*insn
, rtx value
)
13140 unsigned int regno
= REGNO (reg
);
13141 unsigned int endregno
= END_REGNO (reg
);
13143 reg_stat_type
*rsp
;
13145 /* If VALUE contains REG and we have a previous value for REG, substitute
13146 the previous value. */
13147 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
13151 /* Set things up so get_last_value is allowed to see anything set up to
13153 subst_low_luid
= DF_INSN_LUID (insn
);
13154 tem
= get_last_value (reg
);
13156 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13157 it isn't going to be useful and will take a lot of time to process,
13158 so just use the CLOBBER. */
13162 if (ARITHMETIC_P (tem
)
13163 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
13164 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
13165 tem
= XEXP (tem
, 0);
13166 else if (count_occurrences (value
, reg
, 1) >= 2)
13168 /* If there are two or more occurrences of REG in VALUE,
13169 prevent the value from growing too much. */
13170 if (count_rtxs (tem
) > param_max_last_value_rtl
)
13171 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
13174 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
13178 /* For each register modified, show we don't know its value, that
13179 we don't know about its bitwise content, that its value has been
13180 updated, and that we don't know the location of the death of the
13182 for (i
= regno
; i
< endregno
; i
++)
13184 rsp
= ®_stat
[i
];
13187 rsp
->last_set
= insn
;
13189 rsp
->last_set_value
= 0;
13190 rsp
->last_set_mode
= VOIDmode
;
13191 rsp
->last_set_nonzero_bits
= 0;
13192 rsp
->last_set_sign_bit_copies
= 0;
13193 rsp
->last_death
= 0;
13194 rsp
->truncated_to_mode
= VOIDmode
;
13197 /* Mark registers that are being referenced in this value. */
13199 update_table_tick (value
);
13201 /* Now update the status of each register being set.
13202 If someone is using this register in this block, set this register
13203 to invalid since we will get confused between the two lives in this
13204 basic block. This makes using this register always invalid. In cse, we
13205 scan the table to invalidate all entries using this register, but this
13206 is too much work for us. */
13208 for (i
= regno
; i
< endregno
; i
++)
13210 rsp
= ®_stat
[i
];
13211 rsp
->last_set_label
= label_tick
;
13213 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
13214 rsp
->last_set_invalid
= 1;
13216 rsp
->last_set_invalid
= 0;
13219 /* The value being assigned might refer to X (like in "x++;"). In that
13220 case, we must replace it with (clobber (const_int 0)) to prevent
13222 rsp
= ®_stat
[regno
];
13223 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
13225 value
= copy_rtx (value
);
13226 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
13230 /* For the main register being modified, update the value, the mode, the
13231 nonzero bits, and the number of sign bit copies. */
13233 rsp
->last_set_value
= value
;
13237 machine_mode mode
= GET_MODE (reg
);
13238 subst_low_luid
= DF_INSN_LUID (insn
);
13239 rsp
->last_set_mode
= mode
;
13240 if (GET_MODE_CLASS (mode
) == MODE_INT
13241 && HWI_COMPUTABLE_MODE_P (mode
))
13242 mode
= nonzero_bits_mode
;
13243 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
13244 rsp
->last_set_sign_bit_copies
13245 = num_sign_bit_copies (value
, GET_MODE (reg
));
13249 /* Called via note_stores from record_dead_and_set_regs to handle one
13250 SET or CLOBBER in an insn. DATA is the instruction in which the
13251 set is occurring. */
13254 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
13256 rtx_insn
*record_dead_insn
= (rtx_insn
*) data
;
13258 if (GET_CODE (dest
) == SUBREG
)
13259 dest
= SUBREG_REG (dest
);
13261 if (!record_dead_insn
)
13264 record_value_for_reg (dest
, NULL
, NULL_RTX
);
13270 /* If we are setting the whole register, we know its value. Otherwise
13271 show that we don't know the value. We can handle a SUBREG if it's
13272 the low part, but we must be careful with paradoxical SUBREGs on
13273 RISC architectures because we cannot strip e.g. an extension around
13274 a load and record the naked load since the RTL middle-end considers
13275 that the upper bits are defined according to LOAD_EXTEND_OP. */
13276 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
13277 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
13278 else if (GET_CODE (setter
) == SET
13279 && GET_CODE (SET_DEST (setter
)) == SUBREG
13280 && SUBREG_REG (SET_DEST (setter
)) == dest
13281 && known_le (GET_MODE_PRECISION (GET_MODE (dest
)),
13283 && subreg_lowpart_p (SET_DEST (setter
)))
13284 record_value_for_reg (dest
, record_dead_insn
,
13285 WORD_REGISTER_OPERATIONS
13286 && word_register_operation_p (SET_SRC (setter
))
13287 && paradoxical_subreg_p (SET_DEST (setter
))
13289 : gen_lowpart (GET_MODE (dest
),
13290 SET_SRC (setter
)));
13292 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
13294 else if (MEM_P (dest
)
13295 /* Ignore pushes, they clobber nothing. */
13296 && ! push_operand (dest
, GET_MODE (dest
)))
13297 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
13300 /* Update the records of when each REG was most recently set or killed
13301 for the things done by INSN. This is the last thing done in processing
13302 INSN in the combiner loop.
13304 We update reg_stat[], in particular fields last_set, last_set_value,
13305 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13306 last_death, and also the similar information mem_last_set (which insn
13307 most recently modified memory) and last_call_luid (which insn was the
13308 most recent subroutine call). */
13311 record_dead_and_set_regs (rtx_insn
*insn
)
13316 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
13318 if (REG_NOTE_KIND (link
) == REG_DEAD
13319 && REG_P (XEXP (link
, 0)))
13321 unsigned int regno
= REGNO (XEXP (link
, 0));
13322 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
13324 for (i
= regno
; i
< endregno
; i
++)
13326 reg_stat_type
*rsp
;
13328 rsp
= ®_stat
[i
];
13329 rsp
->last_death
= insn
;
13332 else if (REG_NOTE_KIND (link
) == REG_INC
)
13333 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
13338 HARD_REG_SET callee_clobbers
13339 = insn_callee_abi (insn
).full_and_partial_reg_clobbers ();
13340 hard_reg_set_iterator hrsi
;
13341 EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers
, 0, i
, hrsi
)
13343 reg_stat_type
*rsp
;
13345 /* ??? We could try to preserve some information from the last
13346 set of register I if the call doesn't actually clobber
13347 (reg:last_set_mode I), which might be true for ABIs with
13348 partial clobbers. However, it would be difficult to
13349 update last_set_nonzero_bits and last_sign_bit_copies
13350 to account for the part of I that actually was clobbered.
13351 It wouldn't help much anyway, since we rarely see this
13352 situation before RA. */
13353 rsp
= ®_stat
[i
];
13354 rsp
->last_set_invalid
= 1;
13355 rsp
->last_set
= insn
;
13356 rsp
->last_set_value
= 0;
13357 rsp
->last_set_mode
= VOIDmode
;
13358 rsp
->last_set_nonzero_bits
= 0;
13359 rsp
->last_set_sign_bit_copies
= 0;
13360 rsp
->last_death
= 0;
13361 rsp
->truncated_to_mode
= VOIDmode
;
13364 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
13366 /* We can't combine into a call pattern. Remember, though, that
13367 the return value register is set at this LUID. We could
13368 still replace a register with the return value from the
13369 wrong subroutine call! */
13370 note_stores (insn
, record_dead_and_set_regs_1
, NULL_RTX
);
13373 note_stores (insn
, record_dead_and_set_regs_1
, insn
);
13376 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13377 register present in the SUBREG, so for each such SUBREG go back and
13378 adjust nonzero and sign bit information of the registers that are
13379 known to have some zero/sign bits set.
13381 This is needed because when combine blows the SUBREGs away, the
13382 information on zero/sign bits is lost and further combines can be
13383 missed because of that. */
13386 record_promoted_value (rtx_insn
*insn
, rtx subreg
)
13388 struct insn_link
*links
;
13390 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
13391 machine_mode mode
= GET_MODE (subreg
);
13393 if (!HWI_COMPUTABLE_MODE_P (mode
))
13396 for (links
= LOG_LINKS (insn
); links
;)
13398 reg_stat_type
*rsp
;
13400 insn
= links
->insn
;
13401 set
= single_set (insn
);
13403 if (! set
|| !REG_P (SET_DEST (set
))
13404 || REGNO (SET_DEST (set
)) != regno
13405 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
13407 links
= links
->next
;
13411 rsp
= ®_stat
[regno
];
13412 if (rsp
->last_set
== insn
)
13414 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
))
13415 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
13418 if (REG_P (SET_SRC (set
)))
13420 regno
= REGNO (SET_SRC (set
));
13421 links
= LOG_LINKS (insn
);
13428 /* Check if X, a register, is known to contain a value already
13429 truncated to MODE. In this case we can use a subreg to refer to
13430 the truncated value even though in the generic case we would need
13431 an explicit truncation. */
13434 reg_truncated_to_mode (machine_mode mode
, const_rtx x
)
13436 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
13437 machine_mode truncated
= rsp
->truncated_to_mode
;
13440 || rsp
->truncation_label
< label_tick_ebb_start
)
13442 if (!partial_subreg_p (mode
, truncated
))
13444 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, truncated
))
13449 /* If X is a hard reg or a subreg record the mode that the register is
13450 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13451 able to turn a truncate into a subreg using this information. Return true
13452 if traversing X is complete. */
13455 record_truncated_value (rtx x
)
13457 machine_mode truncated_mode
;
13458 reg_stat_type
*rsp
;
13460 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
13462 machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
13463 truncated_mode
= GET_MODE (x
);
13465 if (!partial_subreg_p (truncated_mode
, original_mode
))
13468 truncated_mode
= GET_MODE (x
);
13469 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode
, original_mode
))
13472 x
= SUBREG_REG (x
);
13474 /* ??? For hard-regs we now record everything. We might be able to
13475 optimize this using last_set_mode. */
13476 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
13477 truncated_mode
= GET_MODE (x
);
13481 rsp
= ®_stat
[REGNO (x
)];
13482 if (rsp
->truncated_to_mode
== 0
13483 || rsp
->truncation_label
< label_tick_ebb_start
13484 || partial_subreg_p (truncated_mode
, rsp
->truncated_to_mode
))
13486 rsp
->truncated_to_mode
= truncated_mode
;
13487 rsp
->truncation_label
= label_tick
;
13493 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13494 the modes they are used in. This can help truning TRUNCATEs into
13498 record_truncated_values (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
13500 subrtx_var_iterator::array_type array
;
13501 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
13502 if (record_truncated_value (*iter
))
13503 iter
.skip_subrtxes ();
13506 /* Scan X for promoted SUBREGs. For each one found,
13507 note what it implies to the registers used in it. */
13510 check_promoted_subreg (rtx_insn
*insn
, rtx x
)
13512 if (GET_CODE (x
) == SUBREG
13513 && SUBREG_PROMOTED_VAR_P (x
)
13514 && REG_P (SUBREG_REG (x
)))
13515 record_promoted_value (insn
, x
);
13518 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
13521 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
13525 check_promoted_subreg (insn
, XEXP (x
, i
));
13529 if (XVEC (x
, i
) != 0)
13530 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13531 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
13537 /* Verify that all the registers and memory references mentioned in *LOC are
13538 still valid. *LOC was part of a value set in INSN when label_tick was
13539 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13540 the invalid references with (clobber (const_int 0)) and return 1. This
13541 replacement is useful because we often can get useful information about
13542 the form of a value (e.g., if it was produced by a shift that always
13543 produces -1 or 0) even though we don't know exactly what registers it
13544 was produced from. */
13547 get_last_value_validate (rtx
*loc
, rtx_insn
*insn
, int tick
, int replace
)
13550 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
13551 int len
= GET_RTX_LENGTH (GET_CODE (x
));
13556 unsigned int regno
= REGNO (x
);
13557 unsigned int endregno
= END_REGNO (x
);
13560 for (j
= regno
; j
< endregno
; j
++)
13562 reg_stat_type
*rsp
= ®_stat
[j
];
13563 if (rsp
->last_set_invalid
13564 /* If this is a pseudo-register that was only set once and not
13565 live at the beginning of the function, it is always valid. */
13566 || (! (regno
>= FIRST_PSEUDO_REGISTER
13567 && regno
< reg_n_sets_max
13568 && REG_N_SETS (regno
) == 1
13569 && (!REGNO_REG_SET_P
13570 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
13572 && rsp
->last_set_label
> tick
))
13575 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13582 /* If this is a memory reference, make sure that there were no stores after
13583 it that might have clobbered the value. We don't have alias info, so we
13584 assume any store invalidates it. Moreover, we only have local UIDs, so
13585 we also assume that there were stores in the intervening basic blocks. */
13586 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
13587 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
13590 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13594 for (i
= 0; i
< len
; i
++)
13598 /* Check for identical subexpressions. If x contains
13599 identical subexpression we only have to traverse one of
13601 if (i
== 1 && ARITHMETIC_P (x
))
13603 /* Note that at this point x0 has already been checked
13604 and found valid. */
13605 rtx x0
= XEXP (x
, 0);
13606 rtx x1
= XEXP (x
, 1);
13608 /* If x0 and x1 are identical then x is also valid. */
13612 /* If x1 is identical to a subexpression of x0 then
13613 while checking x0, x1 has already been checked. Thus
13614 it is valid and so as x. */
13615 if (ARITHMETIC_P (x0
)
13616 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13619 /* If x0 is identical to a subexpression of x1 then x is
13620 valid iff the rest of x1 is valid. */
13621 if (ARITHMETIC_P (x1
)
13622 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13624 get_last_value_validate (&XEXP (x1
,
13625 x0
== XEXP (x1
, 0) ? 1 : 0),
13626 insn
, tick
, replace
);
13629 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
13633 else if (fmt
[i
] == 'E')
13634 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13635 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
13636 insn
, tick
, replace
) == 0)
13640 /* If we haven't found a reason for it to be invalid, it is valid. */
13644 /* Get the last value assigned to X, if known. Some registers
13645 in the value may be replaced with (clobber (const_int 0)) if their value
13646 is known longer known reliably. */
13649 get_last_value (const_rtx x
)
13651 unsigned int regno
;
13653 reg_stat_type
*rsp
;
13655 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13656 then convert it to the desired mode. If this is a paradoxical SUBREG,
13657 we cannot predict what values the "extra" bits might have. */
13658 if (GET_CODE (x
) == SUBREG
13659 && subreg_lowpart_p (x
)
13660 && !paradoxical_subreg_p (x
)
13661 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
13662 return gen_lowpart (GET_MODE (x
), value
);
13668 rsp
= ®_stat
[regno
];
13669 value
= rsp
->last_set_value
;
13671 /* If we don't have a value, or if it isn't for this basic block and
13672 it's either a hard register, set more than once, or it's a live
13673 at the beginning of the function, return 0.
13675 Because if it's not live at the beginning of the function then the reg
13676 is always set before being used (is never used without being set).
13677 And, if it's set only once, and it's always set before use, then all
13678 uses must have the same last value, even if it's not from this basic
13682 || (rsp
->last_set_label
< label_tick_ebb_start
13683 && (regno
< FIRST_PSEUDO_REGISTER
13684 || regno
>= reg_n_sets_max
13685 || REG_N_SETS (regno
) != 1
13687 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), regno
))))
13690 /* If the value was set in a later insn than the ones we are processing,
13691 we can't use it even if the register was only set once. */
13692 if (rsp
->last_set_label
== label_tick
13693 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
13696 /* If fewer bits were set than what we are asked for now, we cannot use
13698 if (maybe_lt (GET_MODE_PRECISION (rsp
->last_set_mode
),
13699 GET_MODE_PRECISION (GET_MODE (x
))))
13702 /* If the value has all its registers valid, return it. */
13703 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
13706 /* Otherwise, make a copy and replace any invalid register with
13707 (clobber (const_int 0)). If that fails for some reason, return 0. */
13709 value
= copy_rtx (value
);
13710 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
13716 /* Define three variables used for communication between the following
13719 static unsigned int reg_dead_regno
, reg_dead_endregno
;
13720 static int reg_dead_flag
;
13723 /* Function called via note_stores from reg_dead_at_p.
13725 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13726 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13729 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
13731 unsigned int regno
, endregno
;
13736 regno
= REGNO (dest
);
13737 endregno
= END_REGNO (dest
);
13738 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
13739 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
13742 /* Return nonzero if REG is known to be dead at INSN.
13744 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13745 referencing REG, it is dead. If we hit a SET referencing REG, it is
13746 live. Otherwise, see if it is live or dead at the start of the basic
13747 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13748 must be assumed to be always live. */
13751 reg_dead_at_p (rtx reg
, rtx_insn
*insn
)
13756 /* Set variables for reg_dead_at_p_1. */
13757 reg_dead_regno
= REGNO (reg
);
13758 reg_dead_endregno
= END_REGNO (reg
);
13759 reg_dead_reg
= reg
;
13763 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13764 we allow the machine description to decide whether use-and-clobber
13765 patterns are OK. */
13766 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
13768 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13769 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
13773 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13774 beginning of basic block. */
13775 block
= BLOCK_FOR_INSN (insn
);
13780 if (find_regno_note (insn
, REG_UNUSED
, reg_dead_regno
))
13783 note_stores (insn
, reg_dead_at_p_1
, NULL
);
13785 return reg_dead_flag
== 1 ? 1 : 0;
13787 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
13791 if (insn
== BB_HEAD (block
))
13794 insn
= PREV_INSN (insn
);
13797 /* Look at live-in sets for the basic block that we were in. */
13798 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13799 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
13805 /* Note hard registers in X that are used. */
13808 mark_used_regs_combine (rtx x
)
13810 RTX_CODE code
= GET_CODE (x
);
13811 unsigned int regno
;
13822 case ADDR_DIFF_VEC
:
13827 /* If we are clobbering a MEM, mark any hard registers inside the
13828 address as used. */
13829 if (MEM_P (XEXP (x
, 0)))
13830 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
13835 /* A hard reg in a wide mode may really be multiple registers.
13836 If so, mark all of them just like the first. */
13837 if (regno
< FIRST_PSEUDO_REGISTER
)
13839 /* None of this applies to the stack, frame or arg pointers. */
13840 if (regno
== STACK_POINTER_REGNUM
13841 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13842 && regno
== HARD_FRAME_POINTER_REGNUM
)
13843 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
13844 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
13845 || regno
== FRAME_POINTER_REGNUM
)
13848 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
13854 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13856 rtx testreg
= SET_DEST (x
);
13858 while (GET_CODE (testreg
) == SUBREG
13859 || GET_CODE (testreg
) == ZERO_EXTRACT
13860 || GET_CODE (testreg
) == STRICT_LOW_PART
)
13861 testreg
= XEXP (testreg
, 0);
13863 if (MEM_P (testreg
))
13864 mark_used_regs_combine (XEXP (testreg
, 0));
13866 mark_used_regs_combine (SET_SRC (x
));
13874 /* Recursively scan the operands of this expression. */
13877 const char *fmt
= GET_RTX_FORMAT (code
);
13879 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13882 mark_used_regs_combine (XEXP (x
, i
));
13883 else if (fmt
[i
] == 'E')
13887 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13888 mark_used_regs_combine (XVECEXP (x
, i
, j
));
13894 /* Remove register number REGNO from the dead registers list of INSN.
13896 Return the note used to record the death, if there was one. */
13899 remove_death (unsigned int regno
, rtx_insn
*insn
)
13901 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
13904 remove_note (insn
, note
);
13909 /* For each register (hardware or pseudo) used within expression X, if its
13910 death is in an instruction with luid between FROM_LUID (inclusive) and
13911 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13912 list headed by PNOTES.
13914 That said, don't move registers killed by maybe_kill_insn.
13916 This is done when X is being merged by combination into TO_INSN. These
13917 notes will then be distributed as needed. */
13920 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx_insn
*to_insn
,
13925 enum rtx_code code
= GET_CODE (x
);
13929 unsigned int regno
= REGNO (x
);
13930 rtx_insn
*where_dead
= reg_stat
[regno
].last_death
;
13932 /* If we do not know where the register died, it may still die between
13933 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
13934 if (!where_dead
|| DF_INSN_LUID (where_dead
) >= DF_INSN_LUID (to_insn
))
13936 rtx_insn
*insn
= prev_real_nondebug_insn (to_insn
);
13938 && BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (to_insn
)
13939 && DF_INSN_LUID (insn
) >= from_luid
)
13941 if (dead_or_set_regno_p (insn
, regno
))
13943 if (find_regno_note (insn
, REG_DEAD
, regno
))
13948 insn
= prev_real_nondebug_insn (insn
);
13952 /* Don't move the register if it gets killed in between from and to. */
13953 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
13954 && ! reg_referenced_p (x
, maybe_kill_insn
))
13958 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
13959 && DF_INSN_LUID (where_dead
) >= from_luid
13960 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
13962 rtx note
= remove_death (regno
, where_dead
);
13964 /* It is possible for the call above to return 0. This can occur
13965 when last_death points to I2 or I1 that we combined with.
13966 In that case make a new note.
13968 We must also check for the case where X is a hard register
13969 and NOTE is a death note for a range of hard registers
13970 including X. In that case, we must put REG_DEAD notes for
13971 the remaining registers in place of NOTE. */
13973 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
13974 && partial_subreg_p (GET_MODE (x
), GET_MODE (XEXP (note
, 0))))
13976 unsigned int deadregno
= REGNO (XEXP (note
, 0));
13977 unsigned int deadend
= END_REGNO (XEXP (note
, 0));
13978 unsigned int ourend
= END_REGNO (x
);
13981 for (i
= deadregno
; i
< deadend
; i
++)
13982 if (i
< regno
|| i
>= ourend
)
13983 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
13986 /* If we didn't find any note, or if we found a REG_DEAD note that
13987 covers only part of the given reg, and we have a multi-reg hard
13988 register, then to be safe we must check for REG_DEAD notes
13989 for each register other than the first. They could have
13990 their own REG_DEAD notes lying around. */
13991 else if ((note
== 0
13993 && partial_subreg_p (GET_MODE (XEXP (note
, 0)),
13995 && regno
< FIRST_PSEUDO_REGISTER
13996 && REG_NREGS (x
) > 1)
13998 unsigned int ourend
= END_REGNO (x
);
13999 unsigned int i
, offset
;
14003 offset
= hard_regno_nregs (regno
, GET_MODE (XEXP (note
, 0)));
14007 for (i
= regno
+ offset
; i
< ourend
; i
++)
14008 move_deaths (regno_reg_rtx
[i
],
14009 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
14012 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
14014 XEXP (note
, 1) = *pnotes
;
14018 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
14024 else if (GET_CODE (x
) == SET
)
14026 rtx dest
= SET_DEST (x
);
14028 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14030 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14031 that accesses one word of a multi-word item, some
14032 piece of everything register in the expression is used by
14033 this insn, so remove any old death. */
14034 /* ??? So why do we test for equality of the sizes? */
14036 if (GET_CODE (dest
) == ZERO_EXTRACT
14037 || GET_CODE (dest
) == STRICT_LOW_PART
14038 || (GET_CODE (dest
) == SUBREG
14039 && !read_modify_subreg_p (dest
)))
14041 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14045 /* If this is some other SUBREG, we know it replaces the entire
14046 value, so use that as the destination. */
14047 if (GET_CODE (dest
) == SUBREG
)
14048 dest
= SUBREG_REG (dest
);
14050 /* If this is a MEM, adjust deaths of anything used in the address.
14051 For a REG (the only other possibility), the entire value is
14052 being replaced so the old value is not used in this insn. */
14055 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
14060 else if (GET_CODE (x
) == CLOBBER
)
14063 len
= GET_RTX_LENGTH (code
);
14064 fmt
= GET_RTX_FORMAT (code
);
14066 for (i
= 0; i
< len
; i
++)
14071 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
14072 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
14075 else if (fmt
[i
] == 'e')
14076 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14080 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14081 pattern of an insn. X must be a REG. */
14084 reg_bitfield_target_p (rtx x
, rtx body
)
14088 if (GET_CODE (body
) == SET
)
14090 rtx dest
= SET_DEST (body
);
14092 unsigned int regno
, tregno
, endregno
, endtregno
;
14094 if (GET_CODE (dest
) == ZERO_EXTRACT
)
14095 target
= XEXP (dest
, 0);
14096 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
14097 target
= SUBREG_REG (XEXP (dest
, 0));
14101 if (GET_CODE (target
) == SUBREG
)
14102 target
= SUBREG_REG (target
);
14104 if (!REG_P (target
))
14107 tregno
= REGNO (target
), regno
= REGNO (x
);
14108 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
14109 return target
== x
;
14111 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
14112 endregno
= end_hard_regno (GET_MODE (x
), regno
);
14114 return endregno
> tregno
&& regno
< endtregno
;
14117 else if (GET_CODE (body
) == PARALLEL
)
14118 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
14119 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
14125 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14126 as appropriate. I3 and I2 are the insns resulting from the combination
14127 insns including FROM (I2 may be zero).
14129 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14130 not need REG_DEAD notes because they are being substituted for. This
14131 saves searching in the most common cases.
14133 Each note in the list is either ignored or placed on some insns, depending
14134 on the type of note. */
14137 distribute_notes (rtx notes
, rtx_insn
*from_insn
, rtx_insn
*i3
, rtx_insn
*i2
,
14138 rtx elim_i2
, rtx elim_i1
, rtx elim_i0
)
14140 rtx note
, next_note
;
14142 rtx_insn
*tem_insn
;
14144 for (note
= notes
; note
; note
= next_note
)
14146 rtx_insn
*place
= 0, *place2
= 0;
14148 next_note
= XEXP (note
, 1);
14149 switch (REG_NOTE_KIND (note
))
14153 /* Doesn't matter much where we put this, as long as it's somewhere.
14154 It is preferable to keep these notes on branches, which is most
14155 likely to be i3. */
14159 case REG_NON_LOCAL_GOTO
:
14164 gcc_assert (i2
&& JUMP_P (i2
));
14169 case REG_EH_REGION
:
14170 /* These notes must remain with the call or trapping instruction. */
14173 else if (i2
&& CALL_P (i2
))
14177 gcc_assert (cfun
->can_throw_non_call_exceptions
);
14178 if (may_trap_p (i3
))
14180 else if (i2
&& may_trap_p (i2
))
14182 /* ??? Otherwise assume we've combined things such that we
14183 can now prove that the instructions can't trap. Drop the
14184 note in this case. */
14188 case REG_ARGS_SIZE
:
14189 /* ??? How to distribute between i3-i1. Assume i3 contains the
14190 entire adjustment. Assert i3 contains at least some adjust. */
14191 if (!noop_move_p (i3
))
14193 poly_int64 old_size
, args_size
= get_args_size (note
);
14194 /* fixup_args_size_notes looks at REG_NORETURN note,
14195 so ensure the note is placed there first. */
14199 for (np
= &next_note
; *np
; np
= &XEXP (*np
, 1))
14200 if (REG_NOTE_KIND (*np
) == REG_NORETURN
)
14204 XEXP (n
, 1) = REG_NOTES (i3
);
14205 REG_NOTES (i3
) = n
;
14209 old_size
= fixup_args_size_notes (PREV_INSN (i3
), i3
, args_size
);
14210 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14211 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14212 gcc_assert (maybe_ne (old_size
, args_size
)
14214 && !ACCUMULATE_OUTGOING_ARGS
14215 && find_reg_note (i3
, REG_NORETURN
, NULL_RTX
)));
14222 case REG_CALL_DECL
:
14223 case REG_UNTYPED_CALL
:
14224 case REG_CALL_NOCF_CHECK
:
14225 /* These notes must remain with the call. It should not be
14226 possible for both I2 and I3 to be a call. */
14231 gcc_assert (i2
&& CALL_P (i2
));
14237 /* Any clobbers for i3 may still exist, and so we must process
14238 REG_UNUSED notes from that insn.
14240 Any clobbers from i2 or i1 can only exist if they were added by
14241 recog_for_combine. In that case, recog_for_combine created the
14242 necessary REG_UNUSED notes. Trying to keep any original
14243 REG_UNUSED notes from these insns can cause incorrect output
14244 if it is for the same register as the original i3 dest.
14245 In that case, we will notice that the register is set in i3,
14246 and then add a REG_UNUSED note for the destination of i3, which
14247 is wrong. However, it is possible to have REG_UNUSED notes from
14248 i2 or i1 for register which were both used and clobbered, so
14249 we keep notes from i2 or i1 if they will turn into REG_DEAD
14252 /* If this register is set or clobbered between FROM_INSN and I3,
14253 we should not create a note for it. */
14254 if (reg_set_between_p (XEXP (note
, 0), from_insn
, i3
))
14257 /* If this register is set or clobbered in I3, put the note there
14258 unless there is one already. */
14259 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
14261 if (from_insn
!= i3
)
14264 if (! (REG_P (XEXP (note
, 0))
14265 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
14266 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
14269 /* Otherwise, if this register is used by I3, then this register
14270 now dies here, so we must put a REG_DEAD note here unless there
14272 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
14273 && ! (REG_P (XEXP (note
, 0))
14274 ? find_regno_note (i3
, REG_DEAD
,
14275 REGNO (XEXP (note
, 0)))
14276 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
14278 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14282 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14283 but we can't tell which at this point. We must reset any
14284 expectations we had about the value that was previously
14285 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14286 and, if appropriate, restore its previous value, but we
14287 don't have enough information for that at this point. */
14290 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14292 /* Otherwise, if this register is now referenced in i2
14293 then the register used to be modified in one of the
14294 original insns. If it was i3 (say, in an unused
14295 parallel), it's now completely gone, so the note can
14296 be discarded. But if it was modified in i2, i1 or i0
14297 and we still reference it in i2, then we're
14298 referencing the previous value, and since the
14299 register was modified and REG_UNUSED, we know that
14300 the previous value is now dead. So, if we only
14301 reference the register in i2, we change the note to
14302 REG_DEAD, to reflect the previous value. However, if
14303 we're also setting or clobbering the register as
14304 scratch, we know (because the register was not
14305 referenced in i3) that it's unused, just as it was
14306 unused before, and we place the note in i2. */
14307 if (from_insn
!= i3
&& i2
&& INSN_P (i2
)
14308 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14310 if (!reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14311 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14312 if (! (REG_P (XEXP (note
, 0))
14313 ? find_regno_note (i2
, REG_NOTE_KIND (note
),
14314 REGNO (XEXP (note
, 0)))
14315 : find_reg_note (i2
, REG_NOTE_KIND (note
),
14326 /* These notes say something about results of an insn. We can
14327 only support them if they used to be on I3 in which case they
14328 remain on I3. Otherwise they are ignored.
14330 If the note refers to an expression that is not a constant, we
14331 must also ignore the note since we cannot tell whether the
14332 equivalence is still true. It might be possible to do
14333 slightly better than this (we only have a problem if I2DEST
14334 or I1DEST is present in the expression), but it doesn't
14335 seem worth the trouble. */
14337 if (from_insn
== i3
14338 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
14343 /* These notes say something about how a register is used. They must
14344 be present on any use of the register in I2 or I3. */
14345 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
14348 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
14357 case REG_LABEL_TARGET
:
14358 case REG_LABEL_OPERAND
:
14359 /* This can show up in several ways -- either directly in the
14360 pattern, or hidden off in the constant pool with (or without?)
14361 a REG_EQUAL note. */
14362 /* ??? Ignore the without-reg_equal-note problem for now. */
14363 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
14364 || ((tem_note
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
14365 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14366 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0)))
14370 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
14371 || ((tem_note
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
14372 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14373 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0))))
14381 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14382 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14384 if (place
&& JUMP_P (place
)
14385 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14386 && (JUMP_LABEL (place
) == NULL
14387 || JUMP_LABEL (place
) == XEXP (note
, 0)))
14389 rtx label
= JUMP_LABEL (place
);
14392 JUMP_LABEL (place
) = XEXP (note
, 0);
14393 else if (LABEL_P (label
))
14394 LABEL_NUSES (label
)--;
14397 if (place2
&& JUMP_P (place2
)
14398 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14399 && (JUMP_LABEL (place2
) == NULL
14400 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
14402 rtx label
= JUMP_LABEL (place2
);
14405 JUMP_LABEL (place2
) = XEXP (note
, 0);
14406 else if (LABEL_P (label
))
14407 LABEL_NUSES (label
)--;
14413 /* This note says something about the value of a register prior
14414 to the execution of an insn. It is too much trouble to see
14415 if the note is still correct in all situations. It is better
14416 to simply delete it. */
14420 /* If we replaced the right hand side of FROM_INSN with a
14421 REG_EQUAL note, the original use of the dying register
14422 will not have been combined into I3 and I2. In such cases,
14423 FROM_INSN is guaranteed to be the first of the combined
14424 instructions, so we simply need to search back before
14425 FROM_INSN for the previous use or set of this register,
14426 then alter the notes there appropriately.
14428 If the register is used as an input in I3, it dies there.
14429 Similarly for I2, if it is nonzero and adjacent to I3.
14431 If the register is not used as an input in either I3 or I2
14432 and it is not one of the registers we were supposed to eliminate,
14433 there are two possibilities. We might have a non-adjacent I2
14434 or we might have somehow eliminated an additional register
14435 from a computation. For example, we might have had A & B where
14436 we discover that B will always be zero. In this case we will
14437 eliminate the reference to A.
14439 In both cases, we must search to see if we can find a previous
14440 use of A and put the death note there. */
14443 && from_insn
== i2mod
14444 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
14445 tem_insn
= from_insn
;
14449 && CALL_P (from_insn
)
14450 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
14452 else if (i2
&& reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14454 /* If the new I2 sets the same register that is marked
14455 dead in the note, we do not in general know where to
14456 put the note. One important case we _can_ handle is
14457 when the note comes from I3. */
14458 if (from_insn
== i3
)
14463 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
14465 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
14466 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14468 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
14470 && reg_overlap_mentioned_p (XEXP (note
, 0),
14472 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
14473 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
14480 basic_block bb
= this_basic_block
;
14482 for (tem_insn
= PREV_INSN (tem_insn
); place
== 0; tem_insn
= PREV_INSN (tem_insn
))
14484 if (!NONDEBUG_INSN_P (tem_insn
))
14486 if (tem_insn
== BB_HEAD (bb
))
14491 /* If the register is being set at TEM_INSN, see if that is all
14492 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14493 into a REG_UNUSED note instead. Don't delete sets to
14494 global register vars. */
14495 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
14496 || !global_regs
[REGNO (XEXP (note
, 0))])
14497 && reg_set_p (XEXP (note
, 0), PATTERN (tem_insn
)))
14499 rtx set
= single_set (tem_insn
);
14500 rtx inner_dest
= 0;
14503 for (inner_dest
= SET_DEST (set
);
14504 (GET_CODE (inner_dest
) == STRICT_LOW_PART
14505 || GET_CODE (inner_dest
) == SUBREG
14506 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
14507 inner_dest
= XEXP (inner_dest
, 0))
14510 /* Verify that it was the set, and not a clobber that
14511 modified the register.
14513 If we cannot delete the setter due to side
14514 effects, mark the user with an UNUSED note instead
14517 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
14518 && rtx_equal_p (XEXP (note
, 0), inner_dest
))
14520 /* Move the notes and links of TEM_INSN elsewhere.
14521 This might delete other dead insns recursively.
14522 First set the pattern to something that won't use
14524 rtx old_notes
= REG_NOTES (tem_insn
);
14526 PATTERN (tem_insn
) = pc_rtx
;
14527 REG_NOTES (tem_insn
) = NULL
;
14529 distribute_notes (old_notes
, tem_insn
, tem_insn
, NULL
,
14530 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14531 distribute_links (LOG_LINKS (tem_insn
));
14533 unsigned int regno
= REGNO (XEXP (note
, 0));
14534 reg_stat_type
*rsp
= ®_stat
[regno
];
14535 if (rsp
->last_set
== tem_insn
)
14536 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14538 SET_INSN_DELETED (tem_insn
);
14539 if (tem_insn
== i2
)
14544 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
14546 /* If there isn't already a REG_UNUSED note, put one
14547 here. Do not place a REG_DEAD note, even if
14548 the register is also used here; that would not
14549 match the algorithm used in lifetime analysis
14550 and can cause the consistency check in the
14551 scheduler to fail. */
14552 if (! find_regno_note (tem_insn
, REG_UNUSED
,
14553 REGNO (XEXP (note
, 0))))
14558 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem_insn
))
14559 || (CALL_P (tem_insn
)
14560 && find_reg_fusage (tem_insn
, USE
, XEXP (note
, 0))))
14564 /* If we are doing a 3->2 combination, and we have a
14565 register which formerly died in i3 and was not used
14566 by i2, which now no longer dies in i3 and is used in
14567 i2 but does not die in i2, and place is between i2
14568 and i3, then we may need to move a link from place to
14570 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
14572 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
14573 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14575 struct insn_link
*links
= LOG_LINKS (place
);
14576 LOG_LINKS (place
) = NULL
;
14577 distribute_links (links
);
14582 if (tem_insn
== BB_HEAD (bb
))
14588 /* If the register is set or already dead at PLACE, we needn't do
14589 anything with this note if it is still a REG_DEAD note.
14590 We check here if it is set at all, not if is it totally replaced,
14591 which is what `dead_or_set_p' checks, so also check for it being
14594 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
14596 unsigned int regno
= REGNO (XEXP (note
, 0));
14597 reg_stat_type
*rsp
= ®_stat
[regno
];
14599 if (dead_or_set_p (place
, XEXP (note
, 0))
14600 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
14602 /* Unless the register previously died in PLACE, clear
14603 last_death. [I no longer understand why this is
14605 if (rsp
->last_death
!= place
)
14606 rsp
->last_death
= 0;
14610 rsp
->last_death
= place
;
14612 /* If this is a death note for a hard reg that is occupying
14613 multiple registers, ensure that we are still using all
14614 parts of the object. If we find a piece of the object
14615 that is unused, we must arrange for an appropriate REG_DEAD
14616 note to be added for it. However, we can't just emit a USE
14617 and tag the note to it, since the register might actually
14618 be dead; so we recourse, and the recursive call then finds
14619 the previous insn that used this register. */
14621 if (place
&& REG_NREGS (XEXP (note
, 0)) > 1)
14623 unsigned int endregno
= END_REGNO (XEXP (note
, 0));
14624 bool all_used
= true;
14627 for (i
= regno
; i
< endregno
; i
++)
14628 if ((! refers_to_regno_p (i
, PATTERN (place
))
14629 && ! find_regno_fusage (place
, USE
, i
))
14630 || dead_or_set_regno_p (place
, i
))
14638 /* Put only REG_DEAD notes for pieces that are
14639 not already dead or set. */
14641 for (i
= regno
; i
< endregno
;
14642 i
+= hard_regno_nregs (i
, reg_raw_mode
[i
]))
14644 rtx piece
= regno_reg_rtx
[i
];
14645 basic_block bb
= this_basic_block
;
14647 if (! dead_or_set_p (place
, piece
)
14648 && ! reg_bitfield_target_p (piece
,
14651 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
14654 distribute_notes (new_note
, place
, place
,
14655 NULL
, NULL_RTX
, NULL_RTX
,
14658 else if (! refers_to_regno_p (i
, PATTERN (place
))
14659 && ! find_regno_fusage (place
, USE
, i
))
14660 for (tem_insn
= PREV_INSN (place
); ;
14661 tem_insn
= PREV_INSN (tem_insn
))
14663 if (!NONDEBUG_INSN_P (tem_insn
))
14665 if (tem_insn
== BB_HEAD (bb
))
14669 if (dead_or_set_p (tem_insn
, piece
)
14670 || reg_bitfield_target_p (piece
,
14671 PATTERN (tem_insn
)))
14673 add_reg_note (tem_insn
, REG_UNUSED
, piece
);
14686 /* Any other notes should not be present at this point in the
14688 gcc_unreachable ();
14693 XEXP (note
, 1) = REG_NOTES (place
);
14694 REG_NOTES (place
) = note
;
14696 /* Set added_notes_insn to the earliest insn we added a note to. */
14697 if (added_notes_insn
== 0
14698 || DF_INSN_LUID (added_notes_insn
) > DF_INSN_LUID (place
))
14699 added_notes_insn
= place
;
14704 add_shallow_copy_of_reg_note (place2
, note
);
14706 /* Set added_notes_insn to the earliest insn we added a note to. */
14707 if (added_notes_insn
== 0
14708 || DF_INSN_LUID (added_notes_insn
) > DF_INSN_LUID (place2
))
14709 added_notes_insn
= place2
;
14714 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14715 I3, I2, and I1 to new locations. This is also called to add a link
14716 pointing at I3 when I3's destination is changed. */
14719 distribute_links (struct insn_link
*links
)
14721 struct insn_link
*link
, *next_link
;
14723 for (link
= links
; link
; link
= next_link
)
14725 rtx_insn
*place
= 0;
14729 next_link
= link
->next
;
14731 /* If the insn that this link points to is a NOTE, ignore it. */
14732 if (NOTE_P (link
->insn
))
14736 rtx pat
= PATTERN (link
->insn
);
14737 if (GET_CODE (pat
) == SET
)
14739 else if (GET_CODE (pat
) == PARALLEL
)
14742 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
14744 set
= XVECEXP (pat
, 0, i
);
14745 if (GET_CODE (set
) != SET
)
14748 reg
= SET_DEST (set
);
14749 while (GET_CODE (reg
) == ZERO_EXTRACT
14750 || GET_CODE (reg
) == STRICT_LOW_PART
14751 || GET_CODE (reg
) == SUBREG
)
14752 reg
= XEXP (reg
, 0);
14757 if (REGNO (reg
) == link
->regno
)
14760 if (i
== XVECLEN (pat
, 0))
14766 reg
= SET_DEST (set
);
14768 while (GET_CODE (reg
) == ZERO_EXTRACT
14769 || GET_CODE (reg
) == STRICT_LOW_PART
14770 || GET_CODE (reg
) == SUBREG
)
14771 reg
= XEXP (reg
, 0);
14776 /* A LOG_LINK is defined as being placed on the first insn that uses
14777 a register and points to the insn that sets the register. Start
14778 searching at the next insn after the target of the link and stop
14779 when we reach a set of the register or the end of the basic block.
14781 Note that this correctly handles the link that used to point from
14782 I3 to I2. Also note that not much searching is typically done here
14783 since most links don't point very far away. */
14785 for (insn
= NEXT_INSN (link
->insn
);
14786 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
14787 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
14788 insn
= NEXT_INSN (insn
))
14789 if (DEBUG_INSN_P (insn
))
14791 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
14793 if (reg_referenced_p (reg
, PATTERN (insn
)))
14797 else if (CALL_P (insn
)
14798 && find_reg_fusage (insn
, USE
, reg
))
14803 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
14806 /* If we found a place to put the link, place it there unless there
14807 is already a link to the same insn as LINK at that point. */
14811 struct insn_link
*link2
;
14813 FOR_EACH_LOG_LINK (link2
, place
)
14814 if (link2
->insn
== link
->insn
&& link2
->regno
== link
->regno
)
14819 link
->next
= LOG_LINKS (place
);
14820 LOG_LINKS (place
) = link
;
14822 /* Set added_links_insn to the earliest insn we added a
14824 if (added_links_insn
== 0
14825 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
14826 added_links_insn
= place
;
14832 /* Check for any register or memory mentioned in EQUIV that is not
14833 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14834 of EXPR where some registers may have been replaced by constants. */
14837 unmentioned_reg_p (rtx equiv
, rtx expr
)
14839 subrtx_iterator::array_type array
;
14840 FOR_EACH_SUBRTX (iter
, array
, equiv
, NONCONST
)
14842 const_rtx x
= *iter
;
14843 if ((REG_P (x
) || MEM_P (x
))
14844 && !reg_mentioned_p (x
, expr
))
14850 DEBUG_FUNCTION
void
14851 dump_combine_stats (FILE *file
)
14855 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14856 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
14860 dump_combine_total_stats (FILE *file
)
14864 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14865 total_attempts
, total_merges
, total_extras
, total_successes
);
14868 /* Make pseudo-to-pseudo copies after every hard-reg-to-pseudo-copy, because
14869 the reg-to-reg copy can usefully combine with later instructions, but we
14870 do not want to combine the hard reg into later instructions, for that
14871 restricts register allocation. */
14873 make_more_copies (void)
14877 FOR_EACH_BB_FN (bb
, cfun
)
14881 FOR_BB_INSNS (bb
, insn
)
14883 if (!NONDEBUG_INSN_P (insn
))
14886 rtx set
= single_set (insn
);
14890 rtx dest
= SET_DEST (set
);
14891 if (!(REG_P (dest
) && !HARD_REGISTER_P (dest
)))
14894 rtx src
= SET_SRC (set
);
14895 if (!(REG_P (src
) && HARD_REGISTER_P (src
)))
14897 if (TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
)))
14900 rtx new_reg
= gen_reg_rtx (GET_MODE (dest
));
14901 rtx_insn
*new_insn
= gen_move_insn (new_reg
, src
);
14902 SET_SRC (set
) = new_reg
;
14903 emit_insn_before (new_insn
, insn
);
14904 df_insn_rescan (insn
);
14909 /* Try combining insns through substitution. */
14910 static unsigned int
14911 rest_of_handle_combine (void)
14913 make_more_copies ();
14915 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
14916 df_note_add_problem ();
14919 regstat_init_n_sets_and_refs ();
14920 reg_n_sets_max
= max_reg_num ();
14922 int rebuild_jump_labels_after_combine
14923 = combine_instructions (get_insns (), max_reg_num ());
14925 /* Combining insns may have turned an indirect jump into a
14926 direct jump. Rebuild the JUMP_LABEL fields of jumping
14928 if (rebuild_jump_labels_after_combine
)
14930 if (dom_info_available_p (CDI_DOMINATORS
))
14931 free_dominance_info (CDI_DOMINATORS
);
14932 timevar_push (TV_JUMP
);
14933 rebuild_jump_labels (get_insns ());
14935 timevar_pop (TV_JUMP
);
14938 regstat_free_n_sets_and_refs ();
14944 const pass_data pass_data_combine
=
14946 RTL_PASS
, /* type */
14947 "combine", /* name */
14948 OPTGROUP_NONE
, /* optinfo_flags */
14949 TV_COMBINE
, /* tv_id */
14950 PROP_cfglayout
, /* properties_required */
14951 0, /* properties_provided */
14952 0, /* properties_destroyed */
14953 0, /* todo_flags_start */
14954 TODO_df_finish
, /* todo_flags_finish */
14957 class pass_combine
: public rtl_opt_pass
14960 pass_combine (gcc::context
*ctxt
)
14961 : rtl_opt_pass (pass_data_combine
, ctxt
)
14964 /* opt_pass methods: */
14965 virtual bool gate (function
*) { return (optimize
> 0); }
14966 virtual unsigned int execute (function
*)
14968 return rest_of_handle_combine ();
14971 }; // class pass_combine
14973 } // anon namespace
14976 make_pass_combine (gcc::context
*ctxt
)
14978 return new pass_combine (ctxt
);