1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
95 #include "stor-layout.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
108 /* Number of attempts to combine instructions in this function. */
110 static int combine_attempts
;
112 /* Number of attempts that got as far as substitution in this function. */
114 static int combine_merges
;
116 /* Number of instructions combined with added SETs in this function. */
118 static int combine_extras
;
120 /* Number of instructions combined in this function. */
122 static int combine_successes
;
124 /* Totals over entire compilation. */
126 static int total_attempts
, total_merges
, total_extras
, total_successes
;
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
135 static rtx_insn
*i2mod
;
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139 static rtx i2mod_old_rhs
;
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143 static rtx i2mod_new_rhs
;
145 struct reg_stat_type
{
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn
*last_death
;
149 /* Record last point of modification of (hard or pseudo) register n. */
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
158 We use an approach similar to that used by cse, but change it in the
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
165 Therefore, we maintain the following fields:
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
181 (The next two parameters are out of date).
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
195 /* Record last value assigned to (hard or pseudo) register n. */
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
202 int last_set_table_tick
;
204 /* Record the value of label_tick when the value for register n is placed in
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
214 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
215 char last_set_sign_bit_copies
;
216 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
222 char last_set_invalid
;
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
234 If an entry is zero, it means that we don't know anything special. */
236 unsigned char sign_bit_copies
;
238 unsigned HOST_WIDE_INT nonzero_bits
;
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
244 int truncation_label
;
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
251 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
255 static vec
<reg_stat_type
> reg_stat
;
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
265 static unsigned int reg_n_sets_max
;
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
270 static int mem_last_set
;
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
275 static int last_call_luid
;
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
283 static rtx_insn
*subst_insn
;
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
292 static int subst_low_luid
;
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
297 static HARD_REG_SET newpat_used_regs
;
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
303 static rtx_insn
*added_links_insn
;
305 /* Basic block in which we are performing combines. */
306 static basic_block this_basic_block
;
307 static bool optimize_this_for_speed_p
;
310 /* Length of the currently allocated uid_insn_cost array. */
312 static int max_uid_known
;
314 /* The following array records the insn_rtx_cost for every insn
315 in the instruction stream. */
317 static int *uid_insn_cost
;
319 /* The following array records the LOG_LINKS for every insn in the
320 instruction stream as struct insn_link pointers. */
325 struct insn_link
*next
;
328 static struct insn_link
**uid_log_links
;
331 insn_uid_check (const_rtx insn
)
333 int uid
= INSN_UID (insn
);
334 gcc_checking_assert (uid
<= max_uid_known
);
338 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
339 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
341 #define FOR_EACH_LOG_LINK(L, INSN) \
342 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
344 /* Links for LOG_LINKS are allocated from this obstack. */
346 static struct obstack insn_link_obstack
;
348 /* Allocate a link. */
350 static inline struct insn_link
*
351 alloc_insn_link (rtx_insn
*insn
, unsigned int regno
, struct insn_link
*next
)
354 = (struct insn_link
*) obstack_alloc (&insn_link_obstack
,
355 sizeof (struct insn_link
));
362 /* Incremented for each basic block. */
364 static int label_tick
;
366 /* Reset to label_tick for each extended basic block in scanning order. */
368 static int label_tick_ebb_start
;
370 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
371 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
373 static scalar_int_mode nonzero_bits_mode
;
375 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
376 be safely used. It is zero while computing them and after combine has
377 completed. This former test prevents propagating values based on
378 previously set values, which can be incorrect if a variable is modified
381 static int nonzero_sign_valid
;
384 /* Record one modification to rtl structure
385 to be undone by storing old_contents into *where. */
387 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
, UNDO_LINKS
};
393 union { rtx r
; int i
; machine_mode m
; struct insn_link
*l
; } old_contents
;
394 union { rtx
*r
; int *i
; struct insn_link
**l
; } where
;
397 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
398 num_undo says how many are currently recorded.
400 other_insn is nonzero if we have modified some other insn in the process
401 of working on subst_insn. It must be verified too. */
407 rtx_insn
*other_insn
;
410 static struct undobuf undobuf
;
412 /* Number of times the pseudo being substituted for
413 was found and replaced. */
415 static int n_occurrences
;
417 static rtx
reg_nonzero_bits_for_combine (const_rtx
, scalar_int_mode
,
419 unsigned HOST_WIDE_INT
*);
420 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, scalar_int_mode
,
423 static void do_SUBST (rtx
*, rtx
);
424 static void do_SUBST_INT (int *, int);
425 static void init_reg_last (void);
426 static void setup_incoming_promotions (rtx_insn
*);
427 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
428 static int cant_combine_insn_p (rtx_insn
*);
429 static int can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
430 rtx_insn
*, rtx_insn
*, rtx
*, rtx
*);
431 static int combinable_i3pat (rtx_insn
*, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
432 static int contains_muldiv (rtx
);
433 static rtx_insn
*try_combine (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
435 static void undo_all (void);
436 static void undo_commit (void);
437 static rtx
*find_split_point (rtx
*, rtx_insn
*, bool);
438 static rtx
subst (rtx
, rtx
, rtx
, int, int, int);
439 static rtx
combine_simplify_rtx (rtx
, machine_mode
, int, int);
440 static rtx
simplify_if_then_else (rtx
);
441 static rtx
simplify_set (rtx
);
442 static rtx
simplify_logical (rtx
);
443 static rtx
expand_compound_operation (rtx
);
444 static const_rtx
expand_field_assignment (const_rtx
);
445 static rtx
make_extraction (machine_mode
, rtx
, HOST_WIDE_INT
,
446 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
447 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
448 unsigned HOST_WIDE_INT
*);
449 static rtx
canon_reg_for_combine (rtx
, rtx
);
450 static rtx
force_int_to_mode (rtx
, scalar_int_mode
, scalar_int_mode
,
451 scalar_int_mode
, unsigned HOST_WIDE_INT
, int);
452 static rtx
force_to_mode (rtx
, machine_mode
,
453 unsigned HOST_WIDE_INT
, int);
454 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
455 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
456 static int rtx_equal_for_field_assignment_p (rtx
, rtx
, bool = false);
457 static rtx
make_field_assignment (rtx
);
458 static rtx
apply_distributive_law (rtx
);
459 static rtx
distribute_and_simplify_rtx (rtx
, int);
460 static rtx
simplify_and_const_int_1 (scalar_int_mode
, rtx
,
461 unsigned HOST_WIDE_INT
);
462 static rtx
simplify_and_const_int (rtx
, scalar_int_mode
, rtx
,
463 unsigned HOST_WIDE_INT
);
464 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
465 HOST_WIDE_INT
, machine_mode
, int *);
466 static rtx
simplify_shift_const_1 (enum rtx_code
, machine_mode
, rtx
, int);
467 static rtx
simplify_shift_const (rtx
, enum rtx_code
, machine_mode
, rtx
,
469 static int recog_for_combine (rtx
*, rtx_insn
*, rtx
*);
470 static rtx
gen_lowpart_for_combine (machine_mode
, rtx
);
471 static enum rtx_code
simplify_compare_const (enum rtx_code
, machine_mode
,
473 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
474 static void update_table_tick (rtx
);
475 static void record_value_for_reg (rtx
, rtx_insn
*, rtx
);
476 static void check_promoted_subreg (rtx_insn
*, rtx
);
477 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
478 static void record_dead_and_set_regs (rtx_insn
*);
479 static int get_last_value_validate (rtx
*, rtx_insn
*, int, int);
480 static rtx
get_last_value (const_rtx
);
481 static int use_crosses_set_p (const_rtx
, int);
482 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
483 static int reg_dead_at_p (rtx
, rtx_insn
*);
484 static void move_deaths (rtx
, rtx
, int, rtx_insn
*, rtx
*);
485 static int reg_bitfield_target_p (rtx
, rtx
);
486 static void distribute_notes (rtx
, rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx
, rtx
, rtx
);
487 static void distribute_links (struct insn_link
*);
488 static void mark_used_regs_combine (rtx
);
489 static void record_promoted_value (rtx_insn
*, rtx
);
490 static bool unmentioned_reg_p (rtx
, rtx
);
491 static void record_truncated_values (rtx
*, void *);
492 static bool reg_truncated_to_mode (machine_mode
, const_rtx
);
493 static rtx
gen_lowpart_or_truncate (machine_mode
, rtx
);
496 /* It is not safe to use ordinary gen_lowpart in combine.
497 See comments in gen_lowpart_for_combine. */
498 #undef RTL_HOOKS_GEN_LOWPART
499 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
501 /* Our implementation of gen_lowpart never emits a new pseudo. */
502 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
503 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
505 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
506 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
508 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
509 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
511 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
512 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
514 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
517 /* Convenience wrapper for the canonicalize_comparison target hook.
518 Target hooks cannot use enum rtx_code. */
520 target_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
,
521 bool op0_preserve_value
)
523 int code_int
= (int)*code
;
524 targetm
.canonicalize_comparison (&code_int
, op0
, op1
, op0_preserve_value
);
525 *code
= (enum rtx_code
)code_int
;
528 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
529 PATTERN can not be split. Otherwise, it returns an insn sequence.
530 This is a wrapper around split_insns which ensures that the
531 reg_stat vector is made larger if the splitter creates a new
535 combine_split_insns (rtx pattern
, rtx_insn
*insn
)
540 ret
= split_insns (pattern
, insn
);
541 nregs
= max_reg_num ();
542 if (nregs
> reg_stat
.length ())
543 reg_stat
.safe_grow_cleared (nregs
);
547 /* This is used by find_single_use to locate an rtx in LOC that
548 contains exactly one use of DEST, which is typically either a REG
549 or CC0. It returns a pointer to the innermost rtx expression
550 containing DEST. Appearances of DEST that are being used to
551 totally replace it are not counted. */
554 find_single_use_1 (rtx dest
, rtx
*loc
)
557 enum rtx_code code
= GET_CODE (x
);
573 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
574 of a REG that occupies all of the REG, the insn uses DEST if
575 it is mentioned in the destination or the source. Otherwise, we
576 need just check the source. */
577 if (GET_CODE (SET_DEST (x
)) != CC0
578 && GET_CODE (SET_DEST (x
)) != PC
579 && !REG_P (SET_DEST (x
))
580 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
581 && REG_P (SUBREG_REG (SET_DEST (x
)))
582 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
583 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
584 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
585 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))))
588 return find_single_use_1 (dest
, &SET_SRC (x
));
592 return find_single_use_1 (dest
, &XEXP (x
, 0));
598 /* If it wasn't one of the common cases above, check each expression and
599 vector of this code. Look for a unique usage of DEST. */
601 fmt
= GET_RTX_FORMAT (code
);
602 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
606 if (dest
== XEXP (x
, i
)
607 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
608 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
611 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
614 result
= this_result
;
615 else if (this_result
)
616 /* Duplicate usage. */
619 else if (fmt
[i
] == 'E')
623 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
625 if (XVECEXP (x
, i
, j
) == dest
627 && REG_P (XVECEXP (x
, i
, j
))
628 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
631 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
634 result
= this_result
;
635 else if (this_result
)
645 /* See if DEST, produced in INSN, is used only a single time in the
646 sequel. If so, return a pointer to the innermost rtx expression in which
649 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
651 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
652 care about REG_DEAD notes or LOG_LINKS.
654 Otherwise, we find the single use by finding an insn that has a
655 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
656 only referenced once in that insn, we know that it must be the first
657 and last insn referencing DEST. */
660 find_single_use (rtx dest
, rtx_insn
*insn
, rtx_insn
**ploc
)
665 struct insn_link
*link
;
669 next
= NEXT_INSN (insn
);
671 || (!NONJUMP_INSN_P (next
) && !JUMP_P (next
)))
674 result
= find_single_use_1 (dest
, &PATTERN (next
));
683 bb
= BLOCK_FOR_INSN (insn
);
684 for (next
= NEXT_INSN (insn
);
685 next
&& BLOCK_FOR_INSN (next
) == bb
;
686 next
= NEXT_INSN (next
))
687 if (NONDEBUG_INSN_P (next
) && dead_or_set_p (next
, dest
))
689 FOR_EACH_LOG_LINK (link
, next
)
690 if (link
->insn
== insn
&& link
->regno
== REGNO (dest
))
695 result
= find_single_use_1 (dest
, &PATTERN (next
));
705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
706 insn. The substitution can be undone by undo_all. If INTO is already
707 set to NEWVAL, do not record this change. Because computing NEWVAL might
708 also call SUBST, we have to compute it before we put anything into
712 do_SUBST (rtx
*into
, rtx newval
)
717 if (oldval
== newval
)
720 /* We'd like to catch as many invalid transformations here as
721 possible. Unfortunately, there are way too many mode changes
722 that are perfectly valid, so we'd waste too much effort for
723 little gain doing the checks here. Focus on catching invalid
724 transformations involving integer constants. */
725 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
726 && CONST_INT_P (newval
))
728 /* Sanity check that we're replacing oldval with a CONST_INT
729 that is a valid sign-extension for the original mode. */
730 gcc_assert (INTVAL (newval
)
731 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
733 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
734 CONST_INT is not valid, because after the replacement, the
735 original mode would be gone. Unfortunately, we can't tell
736 when do_SUBST is called to replace the operand thereof, so we
737 perform this test on oldval instead, checking whether an
738 invalid replacement took place before we got here. */
739 gcc_assert (!(GET_CODE (oldval
) == SUBREG
740 && CONST_INT_P (SUBREG_REG (oldval
))));
741 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
742 && CONST_INT_P (XEXP (oldval
, 0))));
746 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
748 buf
= XNEW (struct undo
);
750 buf
->kind
= UNDO_RTX
;
752 buf
->old_contents
.r
= oldval
;
755 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
758 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
760 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
761 for the value of a HOST_WIDE_INT value (including CONST_INT) is
765 do_SUBST_INT (int *into
, int newval
)
770 if (oldval
== newval
)
774 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
776 buf
= XNEW (struct undo
);
778 buf
->kind
= UNDO_INT
;
780 buf
->old_contents
.i
= oldval
;
783 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
786 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
788 /* Similar to SUBST, but just substitute the mode. This is used when
789 changing the mode of a pseudo-register, so that any other
790 references to the entry in the regno_reg_rtx array will change as
794 do_SUBST_MODE (rtx
*into
, machine_mode newval
)
797 machine_mode oldval
= GET_MODE (*into
);
799 if (oldval
== newval
)
803 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
805 buf
= XNEW (struct undo
);
807 buf
->kind
= UNDO_MODE
;
809 buf
->old_contents
.m
= oldval
;
810 adjust_reg_mode (*into
, newval
);
812 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
815 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
820 do_SUBST_LINK (struct insn_link
**into
, struct insn_link
*newval
)
823 struct insn_link
* oldval
= *into
;
825 if (oldval
== newval
)
829 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
831 buf
= XNEW (struct undo
);
833 buf
->kind
= UNDO_LINKS
;
835 buf
->old_contents
.l
= oldval
;
838 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
843 /* Subroutine of try_combine. Determine whether the replacement patterns
844 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
845 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
846 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
847 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
848 of all the instructions can be estimated and the replacements are more
849 expensive than the original sequence. */
852 combine_validate_cost (rtx_insn
*i0
, rtx_insn
*i1
, rtx_insn
*i2
, rtx_insn
*i3
,
853 rtx newpat
, rtx newi2pat
, rtx newotherpat
)
855 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
856 int new_i2_cost
, new_i3_cost
;
857 int old_cost
, new_cost
;
859 /* Lookup the original insn_rtx_costs. */
860 i2_cost
= INSN_COST (i2
);
861 i3_cost
= INSN_COST (i3
);
865 i1_cost
= INSN_COST (i1
);
868 i0_cost
= INSN_COST (i0
);
869 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
870 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
874 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
875 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
881 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
882 i1_cost
= i0_cost
= 0;
885 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
887 if (old_cost
&& i1
&& INSN_UID (i1
) == INSN_UID (i2
))
891 /* Calculate the replacement insn_rtx_costs. */
892 new_i3_cost
= insn_rtx_cost (newpat
, optimize_this_for_speed_p
);
895 new_i2_cost
= insn_rtx_cost (newi2pat
, optimize_this_for_speed_p
);
896 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
897 ? new_i2_cost
+ new_i3_cost
: 0;
901 new_cost
= new_i3_cost
;
905 if (undobuf
.other_insn
)
907 int old_other_cost
, new_other_cost
;
909 old_other_cost
= INSN_COST (undobuf
.other_insn
);
910 new_other_cost
= insn_rtx_cost (newotherpat
, optimize_this_for_speed_p
);
911 if (old_other_cost
> 0 && new_other_cost
> 0)
913 old_cost
+= old_other_cost
;
914 new_cost
+= new_other_cost
;
920 /* Disallow this combination if both new_cost and old_cost are greater than
921 zero, and new_cost is greater than old cost. */
922 int reject
= old_cost
> 0 && new_cost
> old_cost
;
926 fprintf (dump_file
, "%s combination of insns ",
927 reject
? "rejecting" : "allowing");
929 fprintf (dump_file
, "%d, ", INSN_UID (i0
));
930 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
931 fprintf (dump_file
, "%d, ", INSN_UID (i1
));
932 fprintf (dump_file
, "%d and %d\n", INSN_UID (i2
), INSN_UID (i3
));
934 fprintf (dump_file
, "original costs ");
936 fprintf (dump_file
, "%d + ", i0_cost
);
937 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
938 fprintf (dump_file
, "%d + ", i1_cost
);
939 fprintf (dump_file
, "%d + %d = %d\n", i2_cost
, i3_cost
, old_cost
);
942 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
943 new_i2_cost
, new_i3_cost
, new_cost
);
945 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
951 /* Update the uid_insn_cost array with the replacement costs. */
952 INSN_COST (i2
) = new_i2_cost
;
953 INSN_COST (i3
) = new_i3_cost
;
965 /* Delete any insns that copy a register to itself. */
968 delete_noop_moves (void)
970 rtx_insn
*insn
, *next
;
973 FOR_EACH_BB_FN (bb
, cfun
)
975 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
977 next
= NEXT_INSN (insn
);
978 if (INSN_P (insn
) && noop_move_p (insn
))
981 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
983 delete_insn_and_edges (insn
);
990 /* Return false if we do not want to (or cannot) combine DEF. */
992 can_combine_def_p (df_ref def
)
994 /* Do not consider if it is pre/post modification in MEM. */
995 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
998 unsigned int regno
= DF_REF_REGNO (def
);
1000 /* Do not combine frame pointer adjustments. */
1001 if ((regno
== FRAME_POINTER_REGNUM
1002 && (!reload_completed
|| frame_pointer_needed
))
1003 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1004 && regno
== HARD_FRAME_POINTER_REGNUM
1005 && (!reload_completed
|| frame_pointer_needed
))
1006 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1007 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
]))
1013 /* Return false if we do not want to (or cannot) combine USE. */
1015 can_combine_use_p (df_ref use
)
1017 /* Do not consider the usage of the stack pointer by function call. */
1018 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1024 /* Fill in log links field for all insns. */
1027 create_log_links (void)
1030 rtx_insn
**next_use
;
1034 next_use
= XCNEWVEC (rtx_insn
*, max_reg_num ());
1036 /* Pass through each block from the end, recording the uses of each
1037 register and establishing log links when def is encountered.
1038 Note that we do not clear next_use array in order to save time,
1039 so we have to test whether the use is in the same basic block as def.
1041 There are a few cases below when we do not consider the definition or
1042 usage -- these are taken from original flow.c did. Don't ask me why it is
1043 done this way; I don't know and if it works, I don't want to know. */
1045 FOR_EACH_BB_FN (bb
, cfun
)
1047 FOR_BB_INSNS_REVERSE (bb
, insn
)
1049 if (!NONDEBUG_INSN_P (insn
))
1052 /* Log links are created only once. */
1053 gcc_assert (!LOG_LINKS (insn
));
1055 FOR_EACH_INSN_DEF (def
, insn
)
1057 unsigned int regno
= DF_REF_REGNO (def
);
1060 if (!next_use
[regno
])
1063 if (!can_combine_def_p (def
))
1066 use_insn
= next_use
[regno
];
1067 next_use
[regno
] = NULL
;
1069 if (BLOCK_FOR_INSN (use_insn
) != bb
)
1074 We don't build a LOG_LINK for hard registers contained
1075 in ASM_OPERANDs. If these registers get replaced,
1076 we might wind up changing the semantics of the insn,
1077 even if reload can make what appear to be valid
1078 assignments later. */
1079 if (regno
< FIRST_PSEUDO_REGISTER
1080 && asm_noperands (PATTERN (use_insn
)) >= 0)
1083 /* Don't add duplicate links between instructions. */
1084 struct insn_link
*links
;
1085 FOR_EACH_LOG_LINK (links
, use_insn
)
1086 if (insn
== links
->insn
&& regno
== links
->regno
)
1090 LOG_LINKS (use_insn
)
1091 = alloc_insn_link (insn
, regno
, LOG_LINKS (use_insn
));
1094 FOR_EACH_INSN_USE (use
, insn
)
1095 if (can_combine_use_p (use
))
1096 next_use
[DF_REF_REGNO (use
)] = insn
;
1103 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1104 true if we found a LOG_LINK that proves that A feeds B. This only works
1105 if there are no instructions between A and B which could have a link
1106 depending on A, since in that case we would not record a link for B.
1107 We also check the implicit dependency created by a cc0 setter/user
1111 insn_a_feeds_b (rtx_insn
*a
, rtx_insn
*b
)
1113 struct insn_link
*links
;
1114 FOR_EACH_LOG_LINK (links
, b
)
1115 if (links
->insn
== a
)
1117 if (HAVE_cc0
&& sets_cc0_p (a
))
1122 /* Main entry point for combiner. F is the first insn of the function.
1123 NREGS is the first unused pseudo-reg number.
1125 Return nonzero if the combiner has turned an indirect jump
1126 instruction into a direct jump. */
1128 combine_instructions (rtx_insn
*f
, unsigned int nregs
)
1130 rtx_insn
*insn
, *next
;
1132 struct insn_link
*links
, *nextlinks
;
1134 basic_block last_bb
;
1136 int new_direct_jump_p
= 0;
1138 for (first
= f
; first
&& !NONDEBUG_INSN_P (first
); )
1139 first
= NEXT_INSN (first
);
1143 combine_attempts
= 0;
1146 combine_successes
= 0;
1148 rtl_hooks
= combine_rtl_hooks
;
1150 reg_stat
.safe_grow_cleared (nregs
);
1152 init_recog_no_volatile ();
1154 /* Allocate array for insn info. */
1155 max_uid_known
= get_max_uid ();
1156 uid_log_links
= XCNEWVEC (struct insn_link
*, max_uid_known
+ 1);
1157 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1158 gcc_obstack_init (&insn_link_obstack
);
1160 nonzero_bits_mode
= int_mode_for_size (HOST_BITS_PER_WIDE_INT
, 0).require ();
1162 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1163 problems when, for example, we have j <<= 1 in a loop. */
1165 nonzero_sign_valid
= 0;
1166 label_tick
= label_tick_ebb_start
= 1;
1168 /* Scan all SETs and see if we can deduce anything about what
1169 bits are known to be zero for some registers and how many copies
1170 of the sign bit are known to exist for those registers.
1172 Also set any known values so that we can use it while searching
1173 for what bits are known to be set. */
1175 setup_incoming_promotions (first
);
1176 /* Allow the entry block and the first block to fall into the same EBB.
1177 Conceptually the incoming promotions are assigned to the entry block. */
1178 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1180 create_log_links ();
1181 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1183 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1188 if (!single_pred_p (this_basic_block
)
1189 || single_pred (this_basic_block
) != last_bb
)
1190 label_tick_ebb_start
= label_tick
;
1191 last_bb
= this_basic_block
;
1193 FOR_BB_INSNS (this_basic_block
, insn
)
1194 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1198 subst_low_luid
= DF_INSN_LUID (insn
);
1201 note_stores (PATTERN (insn
), set_nonzero_bits_and_sign_copies
,
1203 record_dead_and_set_regs (insn
);
1206 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1207 if (REG_NOTE_KIND (links
) == REG_INC
)
1208 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1211 /* Record the current insn_rtx_cost of this instruction. */
1212 if (NONJUMP_INSN_P (insn
))
1213 INSN_COST (insn
) = insn_rtx_cost (PATTERN (insn
),
1214 optimize_this_for_speed_p
);
1217 fprintf (dump_file
, "insn_cost %d for ", INSN_COST (insn
));
1218 dump_insn_slim (dump_file
, insn
);
1223 nonzero_sign_valid
= 1;
1225 /* Now scan all the insns in forward order. */
1226 label_tick
= label_tick_ebb_start
= 1;
1228 setup_incoming_promotions (first
);
1229 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1230 int max_combine
= PARAM_VALUE (PARAM_MAX_COMBINE_INSNS
);
1232 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1234 rtx_insn
*last_combined_insn
= NULL
;
1235 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1240 if (!single_pred_p (this_basic_block
)
1241 || single_pred (this_basic_block
) != last_bb
)
1242 label_tick_ebb_start
= label_tick
;
1243 last_bb
= this_basic_block
;
1245 rtl_profile_for_bb (this_basic_block
);
1246 for (insn
= BB_HEAD (this_basic_block
);
1247 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1248 insn
= next
? next
: NEXT_INSN (insn
))
1251 if (!NONDEBUG_INSN_P (insn
))
1254 while (last_combined_insn
1255 && (!NONDEBUG_INSN_P (last_combined_insn
)
1256 || last_combined_insn
->deleted ()))
1257 last_combined_insn
= PREV_INSN (last_combined_insn
);
1258 if (last_combined_insn
== NULL_RTX
1259 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1260 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1261 last_combined_insn
= insn
;
1263 /* See if we know about function return values before this
1264 insn based upon SUBREG flags. */
1265 check_promoted_subreg (insn
, PATTERN (insn
));
1267 /* See if we can find hardregs and subreg of pseudos in
1268 narrower modes. This could help turning TRUNCATEs
1270 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1272 /* Try this insn with each insn it links back to. */
1274 FOR_EACH_LOG_LINK (links
, insn
)
1275 if ((next
= try_combine (insn
, links
->insn
, NULL
,
1276 NULL
, &new_direct_jump_p
,
1277 last_combined_insn
)) != 0)
1279 statistics_counter_event (cfun
, "two-insn combine", 1);
1283 /* Try each sequence of three linked insns ending with this one. */
1285 if (max_combine
>= 3)
1286 FOR_EACH_LOG_LINK (links
, insn
)
1288 rtx_insn
*link
= links
->insn
;
1290 /* If the linked insn has been replaced by a note, then there
1291 is no point in pursuing this chain any further. */
1295 FOR_EACH_LOG_LINK (nextlinks
, link
)
1296 if ((next
= try_combine (insn
, link
, nextlinks
->insn
,
1297 NULL
, &new_direct_jump_p
,
1298 last_combined_insn
)) != 0)
1300 statistics_counter_event (cfun
, "three-insn combine", 1);
1305 /* Try to combine a jump insn that uses CC0
1306 with a preceding insn that sets CC0, and maybe with its
1307 logical predecessor as well.
1308 This is how we make decrement-and-branch insns.
1309 We need this special code because data flow connections
1310 via CC0 do not get entered in LOG_LINKS. */
1314 && (prev
= prev_nonnote_insn (insn
)) != 0
1315 && NONJUMP_INSN_P (prev
)
1316 && sets_cc0_p (PATTERN (prev
)))
1318 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1320 last_combined_insn
)) != 0)
1323 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1324 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1325 NULL
, &new_direct_jump_p
,
1326 last_combined_insn
)) != 0)
1330 /* Do the same for an insn that explicitly references CC0. */
1331 if (HAVE_cc0
&& NONJUMP_INSN_P (insn
)
1332 && (prev
= prev_nonnote_insn (insn
)) != 0
1333 && NONJUMP_INSN_P (prev
)
1334 && sets_cc0_p (PATTERN (prev
))
1335 && GET_CODE (PATTERN (insn
)) == SET
1336 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (insn
))))
1338 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1340 last_combined_insn
)) != 0)
1343 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1344 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1345 NULL
, &new_direct_jump_p
,
1346 last_combined_insn
)) != 0)
1350 /* Finally, see if any of the insns that this insn links to
1351 explicitly references CC0. If so, try this insn, that insn,
1352 and its predecessor if it sets CC0. */
1355 FOR_EACH_LOG_LINK (links
, insn
)
1356 if (NONJUMP_INSN_P (links
->insn
)
1357 && GET_CODE (PATTERN (links
->insn
)) == SET
1358 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (links
->insn
)))
1359 && (prev
= prev_nonnote_insn (links
->insn
)) != 0
1360 && NONJUMP_INSN_P (prev
)
1361 && sets_cc0_p (PATTERN (prev
))
1362 && (next
= try_combine (insn
, links
->insn
,
1363 prev
, NULL
, &new_direct_jump_p
,
1364 last_combined_insn
)) != 0)
1368 /* Try combining an insn with two different insns whose results it
1370 if (max_combine
>= 3)
1371 FOR_EACH_LOG_LINK (links
, insn
)
1372 for (nextlinks
= links
->next
; nextlinks
;
1373 nextlinks
= nextlinks
->next
)
1374 if ((next
= try_combine (insn
, links
->insn
,
1375 nextlinks
->insn
, NULL
,
1377 last_combined_insn
)) != 0)
1380 statistics_counter_event (cfun
, "three-insn combine", 1);
1384 /* Try four-instruction combinations. */
1385 if (max_combine
>= 4)
1386 FOR_EACH_LOG_LINK (links
, insn
)
1388 struct insn_link
*next1
;
1389 rtx_insn
*link
= links
->insn
;
1391 /* If the linked insn has been replaced by a note, then there
1392 is no point in pursuing this chain any further. */
1396 FOR_EACH_LOG_LINK (next1
, link
)
1398 rtx_insn
*link1
= next1
->insn
;
1401 /* I0 -> I1 -> I2 -> I3. */
1402 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1403 if ((next
= try_combine (insn
, link
, link1
,
1406 last_combined_insn
)) != 0)
1408 statistics_counter_event (cfun
, "four-insn combine", 1);
1411 /* I0, I1 -> I2, I2 -> I3. */
1412 for (nextlinks
= next1
->next
; nextlinks
;
1413 nextlinks
= nextlinks
->next
)
1414 if ((next
= try_combine (insn
, link
, link1
,
1417 last_combined_insn
)) != 0)
1419 statistics_counter_event (cfun
, "four-insn combine", 1);
1424 for (next1
= links
->next
; next1
; next1
= next1
->next
)
1426 rtx_insn
*link1
= next1
->insn
;
1429 /* I0 -> I2; I1, I2 -> I3. */
1430 FOR_EACH_LOG_LINK (nextlinks
, link
)
1431 if ((next
= try_combine (insn
, link
, link1
,
1434 last_combined_insn
)) != 0)
1436 statistics_counter_event (cfun
, "four-insn combine", 1);
1439 /* I0 -> I1; I1, I2 -> I3. */
1440 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1441 if ((next
= try_combine (insn
, link
, link1
,
1444 last_combined_insn
)) != 0)
1446 statistics_counter_event (cfun
, "four-insn combine", 1);
1452 /* Try this insn with each REG_EQUAL note it links back to. */
1453 FOR_EACH_LOG_LINK (links
, insn
)
1456 rtx_insn
*temp
= links
->insn
;
1457 if ((set
= single_set (temp
)) != 0
1458 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1459 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1460 /* Avoid using a register that may already been marked
1461 dead by an earlier instruction. */
1462 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1463 && (GET_MODE (note
) == VOIDmode
1464 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1465 : (GET_MODE (SET_DEST (set
)) == GET_MODE (note
)
1466 && (GET_CODE (SET_DEST (set
)) != ZERO_EXTRACT
1467 || (GET_MODE (XEXP (SET_DEST (set
), 0))
1468 == GET_MODE (note
))))))
1470 /* Temporarily replace the set's source with the
1471 contents of the REG_EQUAL note. The insn will
1472 be deleted or recognized by try_combine. */
1473 rtx orig_src
= SET_SRC (set
);
1474 rtx orig_dest
= SET_DEST (set
);
1475 if (GET_CODE (SET_DEST (set
)) == ZERO_EXTRACT
)
1476 SET_DEST (set
) = XEXP (SET_DEST (set
), 0);
1477 SET_SRC (set
) = note
;
1479 i2mod_old_rhs
= copy_rtx (orig_src
);
1480 i2mod_new_rhs
= copy_rtx (note
);
1481 next
= try_combine (insn
, i2mod
, NULL
, NULL
,
1483 last_combined_insn
);
1487 statistics_counter_event (cfun
, "insn-with-note combine", 1);
1490 SET_SRC (set
) = orig_src
;
1491 SET_DEST (set
) = orig_dest
;
1496 record_dead_and_set_regs (insn
);
1503 default_rtl_profile ();
1505 new_direct_jump_p
|= purge_all_dead_edges ();
1506 delete_noop_moves ();
1509 obstack_free (&insn_link_obstack
, NULL
);
1510 free (uid_log_links
);
1511 free (uid_insn_cost
);
1512 reg_stat
.release ();
1515 struct undo
*undo
, *next
;
1516 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1524 total_attempts
+= combine_attempts
;
1525 total_merges
+= combine_merges
;
1526 total_extras
+= combine_extras
;
1527 total_successes
+= combine_successes
;
1529 nonzero_sign_valid
= 0;
1530 rtl_hooks
= general_rtl_hooks
;
1532 /* Make recognizer allow volatile MEMs again. */
1535 return new_direct_jump_p
;
1538 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1541 init_reg_last (void)
1546 FOR_EACH_VEC_ELT (reg_stat
, i
, p
)
1547 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1550 /* Set up any promoted values for incoming argument registers. */
1553 setup_incoming_promotions (rtx_insn
*first
)
1556 bool strictly_local
= false;
1558 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1559 arg
= DECL_CHAIN (arg
))
1561 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1563 machine_mode mode1
, mode2
, mode3
, mode4
;
1565 /* Only continue if the incoming argument is in a register. */
1569 /* Determine, if possible, whether all call sites of the current
1570 function lie within the current compilation unit. (This does
1571 take into account the exporting of a function via taking its
1572 address, and so forth.) */
1573 strictly_local
= cgraph_node::local_info (current_function_decl
)->local
;
1575 /* The mode and signedness of the argument before any promotions happen
1576 (equal to the mode of the pseudo holding it at that stage). */
1577 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1578 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1580 /* The mode and signedness of the argument after any source language and
1581 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1582 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1583 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1585 /* The mode and signedness of the argument as it is actually passed,
1586 see assign_parm_setup_reg in function.c. */
1587 mode3
= promote_function_mode (TREE_TYPE (arg
), mode1
, &uns3
,
1588 TREE_TYPE (cfun
->decl
), 0);
1590 /* The mode of the register in which the argument is being passed. */
1591 mode4
= GET_MODE (reg
);
1593 /* Eliminate sign extensions in the callee when:
1594 (a) A mode promotion has occurred; */
1597 /* (b) The mode of the register is the same as the mode of
1598 the argument as it is passed; */
1601 /* (c) There's no language level extension; */
1604 /* (c.1) All callers are from the current compilation unit. If that's
1605 the case we don't have to rely on an ABI, we only have to know
1606 what we're generating right now, and we know that we will do the
1607 mode1 to mode2 promotion with the given sign. */
1608 else if (!strictly_local
)
1610 /* (c.2) The combination of the two promotions is useful. This is
1611 true when the signs match, or if the first promotion is unsigned.
1612 In the later case, (sign_extend (zero_extend x)) is the same as
1613 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1619 /* Record that the value was promoted from mode1 to mode3,
1620 so that any sign extension at the head of the current
1621 function may be eliminated. */
1622 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1623 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1624 record_value_for_reg (reg
, first
, x
);
1628 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1629 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1630 because some machines (maybe most) will actually do the sign-extension and
1631 this is the conservative approach.
1633 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1637 sign_extend_short_imm (rtx src
, machine_mode mode
, unsigned int prec
)
1639 scalar_int_mode int_mode
;
1640 if (CONST_INT_P (src
)
1641 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1642 && GET_MODE_PRECISION (int_mode
) < prec
1644 && val_signbit_known_set_p (int_mode
, INTVAL (src
)))
1645 src
= GEN_INT (INTVAL (src
) | ~GET_MODE_MASK (int_mode
));
1650 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1654 update_rsp_from_reg_equal (reg_stat_type
*rsp
, rtx_insn
*insn
, const_rtx set
,
1657 rtx reg_equal_note
= insn
? find_reg_equal_equiv_note (insn
) : NULL_RTX
;
1658 unsigned HOST_WIDE_INT bits
= 0;
1659 rtx reg_equal
= NULL
, src
= SET_SRC (set
);
1660 unsigned int num
= 0;
1663 reg_equal
= XEXP (reg_equal_note
, 0);
1665 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
1667 src
= sign_extend_short_imm (src
, GET_MODE (x
), BITS_PER_WORD
);
1669 reg_equal
= sign_extend_short_imm (reg_equal
, GET_MODE (x
), BITS_PER_WORD
);
1672 /* Don't call nonzero_bits if it cannot change anything. */
1673 if (rsp
->nonzero_bits
!= HOST_WIDE_INT_M1U
)
1675 bits
= nonzero_bits (src
, nonzero_bits_mode
);
1676 if (reg_equal
&& bits
)
1677 bits
&= nonzero_bits (reg_equal
, nonzero_bits_mode
);
1678 rsp
->nonzero_bits
|= bits
;
1681 /* Don't call num_sign_bit_copies if it cannot change anything. */
1682 if (rsp
->sign_bit_copies
!= 1)
1684 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1685 if (reg_equal
&& num
!= GET_MODE_PRECISION (GET_MODE (x
)))
1687 unsigned int numeq
= num_sign_bit_copies (reg_equal
, GET_MODE (x
));
1688 if (num
== 0 || numeq
> num
)
1691 if (rsp
->sign_bit_copies
== 0 || num
< rsp
->sign_bit_copies
)
1692 rsp
->sign_bit_copies
= num
;
1696 /* Called via note_stores. If X is a pseudo that is narrower than
1697 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1699 If we are setting only a portion of X and we can't figure out what
1700 portion, assume all bits will be used since we don't know what will
1703 Similarly, set how many bits of X are known to be copies of the sign bit
1704 at all locations in the function. This is the smallest number implied
1708 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1710 rtx_insn
*insn
= (rtx_insn
*) data
;
1711 scalar_int_mode mode
;
1714 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1715 /* If this register is undefined at the start of the file, we can't
1716 say what its contents were. */
1717 && ! REGNO_REG_SET_P
1718 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), REGNO (x
))
1719 && is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
1720 && HWI_COMPUTABLE_MODE_P (mode
))
1722 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
1724 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1726 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1727 rsp
->sign_bit_copies
= 1;
1731 /* If this register is being initialized using itself, and the
1732 register is uninitialized in this basic block, and there are
1733 no LOG_LINKS which set the register, then part of the
1734 register is uninitialized. In that case we can't assume
1735 anything about the number of nonzero bits.
1737 ??? We could do better if we checked this in
1738 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1739 could avoid making assumptions about the insn which initially
1740 sets the register, while still using the information in other
1741 insns. We would have to be careful to check every insn
1742 involved in the combination. */
1745 && reg_referenced_p (x
, PATTERN (insn
))
1746 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1749 struct insn_link
*link
;
1751 FOR_EACH_LOG_LINK (link
, insn
)
1752 if (dead_or_set_p (link
->insn
, x
))
1756 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1757 rsp
->sign_bit_copies
= 1;
1762 /* If this is a complex assignment, see if we can convert it into a
1763 simple assignment. */
1764 set
= expand_field_assignment (set
);
1766 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1767 set what we know about X. */
1769 if (SET_DEST (set
) == x
1770 || (paradoxical_subreg_p (SET_DEST (set
))
1771 && SUBREG_REG (SET_DEST (set
)) == x
))
1772 update_rsp_from_reg_equal (rsp
, insn
, set
, x
);
1775 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1776 rsp
->sign_bit_copies
= 1;
1781 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1782 optionally insns that were previously combined into I3 or that will be
1783 combined into the merger of INSN and I3. The order is PRED, PRED2,
1784 INSN, SUCC, SUCC2, I3.
1786 Return 0 if the combination is not allowed for any reason.
1788 If the combination is allowed, *PDEST will be set to the single
1789 destination of INSN and *PSRC to the single source, and this function
1793 can_combine_p (rtx_insn
*insn
, rtx_insn
*i3
, rtx_insn
*pred ATTRIBUTE_UNUSED
,
1794 rtx_insn
*pred2 ATTRIBUTE_UNUSED
, rtx_insn
*succ
, rtx_insn
*succ2
,
1795 rtx
*pdest
, rtx
*psrc
)
1802 bool all_adjacent
= true;
1803 int (*is_volatile_p
) (const_rtx
);
1809 if (next_active_insn (succ2
) != i3
)
1810 all_adjacent
= false;
1811 if (next_active_insn (succ
) != succ2
)
1812 all_adjacent
= false;
1814 else if (next_active_insn (succ
) != i3
)
1815 all_adjacent
= false;
1816 if (next_active_insn (insn
) != succ
)
1817 all_adjacent
= false;
1819 else if (next_active_insn (insn
) != i3
)
1820 all_adjacent
= false;
1822 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1823 or a PARALLEL consisting of such a SET and CLOBBERs.
1825 If INSN has CLOBBER parallel parts, ignore them for our processing.
1826 By definition, these happen during the execution of the insn. When it
1827 is merged with another insn, all bets are off. If they are, in fact,
1828 needed and aren't also supplied in I3, they may be added by
1829 recog_for_combine. Otherwise, it won't match.
1831 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1834 Get the source and destination of INSN. If more than one, can't
1837 if (GET_CODE (PATTERN (insn
)) == SET
)
1838 set
= PATTERN (insn
);
1839 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1840 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1842 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1844 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1846 switch (GET_CODE (elt
))
1848 /* This is important to combine floating point insns
1849 for the SH4 port. */
1851 /* Combining an isolated USE doesn't make sense.
1852 We depend here on combinable_i3pat to reject them. */
1853 /* The code below this loop only verifies that the inputs of
1854 the SET in INSN do not change. We call reg_set_between_p
1855 to verify that the REG in the USE does not change between
1857 If the USE in INSN was for a pseudo register, the matching
1858 insn pattern will likely match any register; combining this
1859 with any other USE would only be safe if we knew that the
1860 used registers have identical values, or if there was
1861 something to tell them apart, e.g. different modes. For
1862 now, we forgo such complicated tests and simply disallow
1863 combining of USES of pseudo registers with any other USE. */
1864 if (REG_P (XEXP (elt
, 0))
1865 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1867 rtx i3pat
= PATTERN (i3
);
1868 int i
= XVECLEN (i3pat
, 0) - 1;
1869 unsigned int regno
= REGNO (XEXP (elt
, 0));
1873 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1875 if (GET_CODE (i3elt
) == USE
1876 && REG_P (XEXP (i3elt
, 0))
1877 && (REGNO (XEXP (i3elt
, 0)) == regno
1878 ? reg_set_between_p (XEXP (elt
, 0),
1879 PREV_INSN (insn
), i3
)
1880 : regno
>= FIRST_PSEUDO_REGISTER
))
1887 /* We can ignore CLOBBERs. */
1892 /* Ignore SETs whose result isn't used but not those that
1893 have side-effects. */
1894 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1895 && insn_nothrow_p (insn
)
1896 && !side_effects_p (elt
))
1899 /* If we have already found a SET, this is a second one and
1900 so we cannot combine with this insn. */
1908 /* Anything else means we can't combine. */
1914 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1915 so don't do anything with it. */
1916 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1925 /* The simplification in expand_field_assignment may call back to
1926 get_last_value, so set safe guard here. */
1927 subst_low_luid
= DF_INSN_LUID (insn
);
1929 set
= expand_field_assignment (set
);
1930 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1932 /* Do not eliminate user-specified register if it is in an
1933 asm input because we may break the register asm usage defined
1934 in GCC manual if allow to do so.
1935 Be aware that this may cover more cases than we expect but this
1936 should be harmless. */
1937 if (REG_P (dest
) && REG_USERVAR_P (dest
) && HARD_REGISTER_P (dest
)
1938 && extract_asm_operands (PATTERN (i3
)))
1941 /* Don't eliminate a store in the stack pointer. */
1942 if (dest
== stack_pointer_rtx
1943 /* Don't combine with an insn that sets a register to itself if it has
1944 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1945 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1946 /* Can't merge an ASM_OPERANDS. */
1947 || GET_CODE (src
) == ASM_OPERANDS
1948 /* Can't merge a function call. */
1949 || GET_CODE (src
) == CALL
1950 /* Don't eliminate a function call argument. */
1952 && (find_reg_fusage (i3
, USE
, dest
)
1954 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1955 && global_regs
[REGNO (dest
)])))
1956 /* Don't substitute into an incremented register. */
1957 || FIND_REG_INC_NOTE (i3
, dest
)
1958 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1959 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1960 /* Don't substitute into a non-local goto, this confuses CFG. */
1961 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
1962 /* Make sure that DEST is not used after INSN but before SUCC, or
1963 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1966 && (reg_used_between_p (dest
, succ2
, i3
)
1967 || reg_used_between_p (dest
, succ
, succ2
)))
1968 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))
1970 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1971 that case SUCC is not in the insn stream, so use SUCC2
1972 instead for this test. */
1973 && reg_used_between_p (dest
, insn
,
1975 && INSN_UID (succ
) == INSN_UID (succ2
)
1977 /* Make sure that the value that is to be substituted for the register
1978 does not use any registers whose values alter in between. However,
1979 If the insns are adjacent, a use can't cross a set even though we
1980 think it might (this can happen for a sequence of insns each setting
1981 the same destination; last_set of that register might point to
1982 a NOTE). If INSN has a REG_EQUIV note, the register is always
1983 equivalent to the memory so the substitution is valid even if there
1984 are intervening stores. Also, don't move a volatile asm or
1985 UNSPEC_VOLATILE across any other insns. */
1988 || ! find_reg_note (insn
, REG_EQUIV
, src
))
1989 && use_crosses_set_p (src
, DF_INSN_LUID (insn
)))
1990 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
1991 || GET_CODE (src
) == UNSPEC_VOLATILE
))
1992 /* Don't combine across a CALL_INSN, because that would possibly
1993 change whether the life span of some REGs crosses calls or not,
1994 and it is a pain to update that information.
1995 Exception: if source is a constant, moving it later can't hurt.
1996 Accept that as a special case. */
1997 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
2000 /* DEST must either be a REG or CC0. */
2003 /* If register alignment is being enforced for multi-word items in all
2004 cases except for parameters, it is possible to have a register copy
2005 insn referencing a hard register that is not allowed to contain the
2006 mode being copied and which would not be valid as an operand of most
2007 insns. Eliminate this problem by not combining with such an insn.
2009 Also, on some machines we don't want to extend the life of a hard
2013 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
2014 && !targetm
.hard_regno_mode_ok (REGNO (dest
), GET_MODE (dest
)))
2015 /* Don't extend the life of a hard register unless it is
2016 user variable (if we have few registers) or it can't
2017 fit into the desired register (meaning something special
2019 Also avoid substituting a return register into I3, because
2020 reload can't handle a conflict with constraints of other
2022 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
2023 && !targetm
.hard_regno_mode_ok (REGNO (src
),
2027 else if (GET_CODE (dest
) != CC0
)
2031 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
2032 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
2033 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
2035 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
2037 /* If the clobber represents an earlyclobber operand, we must not
2038 substitute an expression containing the clobbered register.
2039 As we do not analyze the constraint strings here, we have to
2040 make the conservative assumption. However, if the register is
2041 a fixed hard reg, the clobber cannot represent any operand;
2042 we leave it up to the machine description to either accept or
2043 reject use-and-clobber patterns. */
2045 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
2046 || !fixed_regs
[REGNO (reg
)])
2047 if (reg_overlap_mentioned_p (reg
, src
))
2051 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2052 or not), reject, unless nothing volatile comes between it and I3 */
2054 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
2056 /* Make sure neither succ nor succ2 contains a volatile reference. */
2057 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
2059 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
2061 /* We'll check insns between INSN and I3 below. */
2064 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2065 to be an explicit register variable, and was chosen for a reason. */
2067 if (GET_CODE (src
) == ASM_OPERANDS
2068 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
2071 /* If INSN contains volatile references (specifically volatile MEMs),
2072 we cannot combine across any other volatile references.
2073 Even if INSN doesn't contain volatile references, any intervening
2074 volatile insn might affect machine state. */
2076 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
2080 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
2081 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
2084 /* If INSN contains an autoincrement or autodecrement, make sure that
2085 register is not used between there and I3, and not already used in
2086 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2087 Also insist that I3 not be a jump; if it were one
2088 and the incremented register were spilled, we would lose. */
2091 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2092 if (REG_NOTE_KIND (link
) == REG_INC
2094 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
2095 || (pred
!= NULL_RTX
2096 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
2097 || (pred2
!= NULL_RTX
2098 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
2099 || (succ
!= NULL_RTX
2100 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
2101 || (succ2
!= NULL_RTX
2102 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
2103 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
2106 /* Don't combine an insn that follows a CC0-setting insn.
2107 An insn that uses CC0 must not be separated from the one that sets it.
2108 We do, however, allow I2 to follow a CC0-setting insn if that insn
2109 is passed as I1; in that case it will be deleted also.
2110 We also allow combining in this case if all the insns are adjacent
2111 because that would leave the two CC0 insns adjacent as well.
2112 It would be more logical to test whether CC0 occurs inside I1 or I2,
2113 but that would be much slower, and this ought to be equivalent. */
2117 p
= prev_nonnote_insn (insn
);
2118 if (p
&& p
!= pred
&& NONJUMP_INSN_P (p
) && sets_cc0_p (PATTERN (p
))
2123 /* If we get here, we have passed all the tests and the combination is
2132 /* LOC is the location within I3 that contains its pattern or the component
2133 of a PARALLEL of the pattern. We validate that it is valid for combining.
2135 One problem is if I3 modifies its output, as opposed to replacing it
2136 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2137 doing so would produce an insn that is not equivalent to the original insns.
2141 (set (reg:DI 101) (reg:DI 100))
2142 (set (subreg:SI (reg:DI 101) 0) <foo>)
2144 This is NOT equivalent to:
2146 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2147 (set (reg:DI 101) (reg:DI 100))])
2149 Not only does this modify 100 (in which case it might still be valid
2150 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2152 We can also run into a problem if I2 sets a register that I1
2153 uses and I1 gets directly substituted into I3 (not via I2). In that
2154 case, we would be getting the wrong value of I2DEST into I3, so we
2155 must reject the combination. This case occurs when I2 and I1 both
2156 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2157 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2158 of a SET must prevent combination from occurring. The same situation
2159 can occur for I0, in which case I0_NOT_IN_SRC is set.
2161 Before doing the above check, we first try to expand a field assignment
2162 into a set of logical operations.
2164 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2165 we place a register that is both set and used within I3. If more than one
2166 such register is detected, we fail.
2168 Return 1 if the combination is valid, zero otherwise. */
2171 combinable_i3pat (rtx_insn
*i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2172 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2176 if (GET_CODE (x
) == SET
)
2179 rtx dest
= SET_DEST (set
);
2180 rtx src
= SET_SRC (set
);
2181 rtx inner_dest
= dest
;
2184 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2185 || GET_CODE (inner_dest
) == SUBREG
2186 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2187 inner_dest
= XEXP (inner_dest
, 0);
2189 /* Check for the case where I3 modifies its output, as discussed
2190 above. We don't want to prevent pseudos from being combined
2191 into the address of a MEM, so only prevent the combination if
2192 i1 or i2 set the same MEM. */
2193 if ((inner_dest
!= dest
&&
2194 (!MEM_P (inner_dest
)
2195 || rtx_equal_p (i2dest
, inner_dest
)
2196 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2197 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2198 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2199 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2200 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2202 /* This is the same test done in can_combine_p except we can't test
2203 all_adjacent; we don't have to, since this instruction will stay
2204 in place, thus we are not considering increasing the lifetime of
2207 Also, if this insn sets a function argument, combining it with
2208 something that might need a spill could clobber a previous
2209 function argument; the all_adjacent test in can_combine_p also
2210 checks this; here, we do a more specific test for this case. */
2212 || (REG_P (inner_dest
)
2213 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2214 && !targetm
.hard_regno_mode_ok (REGNO (inner_dest
),
2215 GET_MODE (inner_dest
)))
2216 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2217 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2220 /* If DEST is used in I3, it is being killed in this insn, so
2221 record that for later. We have to consider paradoxical
2222 subregs here, since they kill the whole register, but we
2223 ignore partial subregs, STRICT_LOW_PART, etc.
2224 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2225 STACK_POINTER_REGNUM, since these are always considered to be
2226 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2228 if (GET_CODE (subdest
) == SUBREG
&& !partial_subreg_p (subdest
))
2229 subdest
= SUBREG_REG (subdest
);
2232 && reg_referenced_p (subdest
, PATTERN (i3
))
2233 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2234 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2235 || REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
)
2236 && (FRAME_POINTER_REGNUM
== ARG_POINTER_REGNUM
2237 || (REGNO (subdest
) != ARG_POINTER_REGNUM
2238 || ! fixed_regs
[REGNO (subdest
)]))
2239 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2241 if (*pi3dest_killed
)
2244 *pi3dest_killed
= subdest
;
2248 else if (GET_CODE (x
) == PARALLEL
)
2252 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2253 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2254 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2261 /* Return 1 if X is an arithmetic expression that contains a multiplication
2262 and division. We don't count multiplications by powers of two here. */
2265 contains_muldiv (rtx x
)
2267 switch (GET_CODE (x
))
2269 case MOD
: case DIV
: case UMOD
: case UDIV
:
2273 return ! (CONST_INT_P (XEXP (x
, 1))
2274 && pow2p_hwi (UINTVAL (XEXP (x
, 1))));
2277 return contains_muldiv (XEXP (x
, 0))
2278 || contains_muldiv (XEXP (x
, 1));
2281 return contains_muldiv (XEXP (x
, 0));
2287 /* Determine whether INSN can be used in a combination. Return nonzero if
2288 not. This is used in try_combine to detect early some cases where we
2289 can't perform combinations. */
2292 cant_combine_insn_p (rtx_insn
*insn
)
2297 /* If this isn't really an insn, we can't do anything.
2298 This can occur when flow deletes an insn that it has merged into an
2299 auto-increment address. */
2300 if (!NONDEBUG_INSN_P (insn
))
2303 /* Never combine loads and stores involving hard regs that are likely
2304 to be spilled. The register allocator can usually handle such
2305 reg-reg moves by tying. If we allow the combiner to make
2306 substitutions of likely-spilled regs, reload might die.
2307 As an exception, we allow combinations involving fixed regs; these are
2308 not available to the register allocator so there's no risk involved. */
2310 set
= single_set (insn
);
2313 src
= SET_SRC (set
);
2314 dest
= SET_DEST (set
);
2315 if (GET_CODE (src
) == SUBREG
)
2316 src
= SUBREG_REG (src
);
2317 if (GET_CODE (dest
) == SUBREG
)
2318 dest
= SUBREG_REG (dest
);
2319 if (REG_P (src
) && REG_P (dest
)
2320 && ((HARD_REGISTER_P (src
)
2321 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2322 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src
))))
2323 || (HARD_REGISTER_P (dest
)
2324 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2325 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2331 struct likely_spilled_retval_info
2333 unsigned regno
, nregs
;
2337 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2338 hard registers that are known to be written to / clobbered in full. */
2340 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2342 struct likely_spilled_retval_info
*const info
=
2343 (struct likely_spilled_retval_info
*) data
;
2344 unsigned regno
, nregs
;
2347 if (!REG_P (XEXP (set
, 0)))
2350 if (regno
>= info
->regno
+ info
->nregs
)
2352 nregs
= REG_NREGS (x
);
2353 if (regno
+ nregs
<= info
->regno
)
2355 new_mask
= (2U << (nregs
- 1)) - 1;
2356 if (regno
< info
->regno
)
2357 new_mask
>>= info
->regno
- regno
;
2359 new_mask
<<= regno
- info
->regno
;
2360 info
->mask
&= ~new_mask
;
2363 /* Return nonzero iff part of the return value is live during INSN, and
2364 it is likely spilled. This can happen when more than one insn is needed
2365 to copy the return value, e.g. when we consider to combine into the
2366 second copy insn for a complex value. */
2369 likely_spilled_retval_p (rtx_insn
*insn
)
2371 rtx_insn
*use
= BB_END (this_basic_block
);
2374 unsigned regno
, nregs
;
2375 /* We assume here that no machine mode needs more than
2376 32 hard registers when the value overlaps with a register
2377 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2379 struct likely_spilled_retval_info info
;
2381 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2383 reg
= XEXP (PATTERN (use
), 0);
2384 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2386 regno
= REGNO (reg
);
2387 nregs
= REG_NREGS (reg
);
2390 mask
= (2U << (nregs
- 1)) - 1;
2392 /* Disregard parts of the return value that are set later. */
2396 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2398 note_stores (PATTERN (p
), likely_spilled_retval_1
, &info
);
2401 /* Check if any of the (probably) live return value registers is
2406 if ((mask
& 1 << nregs
)
2407 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2413 /* Adjust INSN after we made a change to its destination.
2415 Changing the destination can invalidate notes that say something about
2416 the results of the insn and a LOG_LINK pointing to the insn. */
2419 adjust_for_new_dest (rtx_insn
*insn
)
2421 /* For notes, be conservative and simply remove them. */
2422 remove_reg_equal_equiv_notes (insn
);
2424 /* The new insn will have a destination that was previously the destination
2425 of an insn just above it. Call distribute_links to make a LOG_LINK from
2426 the next use of that destination. */
2428 rtx set
= single_set (insn
);
2431 rtx reg
= SET_DEST (set
);
2433 while (GET_CODE (reg
) == ZERO_EXTRACT
2434 || GET_CODE (reg
) == STRICT_LOW_PART
2435 || GET_CODE (reg
) == SUBREG
)
2436 reg
= XEXP (reg
, 0);
2437 gcc_assert (REG_P (reg
));
2439 distribute_links (alloc_insn_link (insn
, REGNO (reg
), NULL
));
2441 df_insn_rescan (insn
);
2444 /* Return TRUE if combine can reuse reg X in mode MODE.
2445 ADDED_SETS is nonzero if the original set is still required. */
2447 can_change_dest_mode (rtx x
, int added_sets
, machine_mode mode
)
2455 /* Allow hard registers if the new mode is legal, and occupies no more
2456 registers than the old mode. */
2457 if (regno
< FIRST_PSEUDO_REGISTER
)
2458 return (targetm
.hard_regno_mode_ok (regno
, mode
)
2459 && REG_NREGS (x
) >= hard_regno_nregs (regno
, mode
));
2461 /* Or a pseudo that is only used once. */
2462 return (regno
< reg_n_sets_max
2463 && REG_N_SETS (regno
) == 1
2465 && !REG_USERVAR_P (x
));
2469 /* Check whether X, the destination of a set, refers to part of
2470 the register specified by REG. */
2473 reg_subword_p (rtx x
, rtx reg
)
2475 /* Check that reg is an integer mode register. */
2476 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2479 if (GET_CODE (x
) == STRICT_LOW_PART
2480 || GET_CODE (x
) == ZERO_EXTRACT
)
2483 return GET_CODE (x
) == SUBREG
2484 && SUBREG_REG (x
) == reg
2485 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2488 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2489 Note that the INSN should be deleted *after* removing dead edges, so
2490 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2491 but not for a (set (pc) (label_ref FOO)). */
2494 update_cfg_for_uncondjump (rtx_insn
*insn
)
2496 basic_block bb
= BLOCK_FOR_INSN (insn
);
2497 gcc_assert (BB_END (bb
) == insn
);
2499 purge_dead_edges (bb
);
2502 if (EDGE_COUNT (bb
->succs
) == 1)
2506 single_succ_edge (bb
)->flags
|= EDGE_FALLTHRU
;
2508 /* Remove barriers from the footer if there are any. */
2509 for (insn
= BB_FOOTER (bb
); insn
; insn
= NEXT_INSN (insn
))
2510 if (BARRIER_P (insn
))
2512 if (PREV_INSN (insn
))
2513 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
2515 BB_FOOTER (bb
) = NEXT_INSN (insn
);
2516 if (NEXT_INSN (insn
))
2517 SET_PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
2519 else if (LABEL_P (insn
))
2524 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2525 by an arbitrary number of CLOBBERs. */
2527 is_parallel_of_n_reg_sets (rtx pat
, int n
)
2529 if (GET_CODE (pat
) != PARALLEL
)
2532 int len
= XVECLEN (pat
, 0);
2537 for (i
= 0; i
< n
; i
++)
2538 if (GET_CODE (XVECEXP (pat
, 0, i
)) != SET
2539 || !REG_P (SET_DEST (XVECEXP (pat
, 0, i
))))
2541 for ( ; i
< len
; i
++)
2542 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
2543 || XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
2549 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2550 CLOBBERs), can be split into individual SETs in that order, without
2551 changing semantics. */
2553 can_split_parallel_of_n_reg_sets (rtx_insn
*insn
, int n
)
2555 if (!insn_nothrow_p (insn
))
2558 rtx pat
= PATTERN (insn
);
2561 for (i
= 0; i
< n
; i
++)
2563 if (side_effects_p (SET_SRC (XVECEXP (pat
, 0, i
))))
2566 rtx reg
= SET_DEST (XVECEXP (pat
, 0, i
));
2568 for (j
= i
+ 1; j
< n
; j
++)
2569 if (reg_referenced_p (reg
, XVECEXP (pat
, 0, j
)))
2576 /* Try to combine the insns I0, I1 and I2 into I3.
2577 Here I0, I1 and I2 appear earlier than I3.
2578 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2581 If we are combining more than two insns and the resulting insn is not
2582 recognized, try splitting it into two insns. If that happens, I2 and I3
2583 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2584 Otherwise, I0, I1 and I2 are pseudo-deleted.
2586 Return 0 if the combination does not work. Then nothing is changed.
2587 If we did the combination, return the insn at which combine should
2590 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2591 new direct jump instruction.
2593 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2594 been I3 passed to an earlier try_combine within the same basic
2598 try_combine (rtx_insn
*i3
, rtx_insn
*i2
, rtx_insn
*i1
, rtx_insn
*i0
,
2599 int *new_direct_jump_p
, rtx_insn
*last_combined_insn
)
2601 /* New patterns for I3 and I2, respectively. */
2602 rtx newpat
, newi2pat
= 0;
2603 rtvec newpat_vec_with_clobbers
= 0;
2604 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2605 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2607 int added_sets_0
, added_sets_1
, added_sets_2
;
2608 /* Total number of SETs to put into I3. */
2610 /* Nonzero if I2's or I1's body now appears in I3. */
2611 int i2_is_used
= 0, i1_is_used
= 0;
2612 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2613 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2614 /* Contains I3 if the destination of I3 is used in its source, which means
2615 that the old life of I3 is being killed. If that usage is placed into
2616 I2 and not in I3, a REG_DEAD note must be made. */
2617 rtx i3dest_killed
= 0;
2618 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2619 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2620 /* Copy of SET_SRC of I1 and I0, if needed. */
2621 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2622 /* Set if I2DEST was reused as a scratch register. */
2623 bool i2scratch
= false;
2624 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2625 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2626 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2627 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2628 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2629 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2630 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2631 /* Notes that must be added to REG_NOTES in I3 and I2. */
2632 rtx new_i3_notes
, new_i2_notes
;
2633 /* Notes that we substituted I3 into I2 instead of the normal case. */
2634 int i3_subst_into_i2
= 0;
2635 /* Notes that I1, I2 or I3 is a MULT operation. */
2638 int changed_i3_dest
= 0;
2641 rtx_insn
*temp_insn
;
2643 struct insn_link
*link
;
2645 rtx new_other_notes
;
2647 scalar_int_mode dest_mode
, temp_mode
;
2649 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2651 if (i1
== i2
|| i0
== i2
|| (i0
&& i0
== i1
))
2654 /* Only try four-insn combinations when there's high likelihood of
2655 success. Look for simple insns, such as loads of constants or
2656 binary operations involving a constant. */
2664 if (!flag_expensive_optimizations
)
2667 for (i
= 0; i
< 4; i
++)
2669 rtx_insn
*insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2670 rtx set
= single_set (insn
);
2674 src
= SET_SRC (set
);
2675 if (CONSTANT_P (src
))
2680 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2682 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2683 || GET_CODE (src
) == LSHIFTRT
)
2687 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2688 are likely manipulating its value. Ideally we'll be able to combine
2689 all four insns into a bitfield insertion of some kind.
2691 Note the source in I0 might be inside a sign/zero extension and the
2692 memory modes in I0 and I3 might be different. So extract the address
2693 from the destination of I3 and search for it in the source of I0.
2695 In the event that there's a match but the source/dest do not actually
2696 refer to the same memory, the worst that happens is we try some
2697 combinations that we wouldn't have otherwise. */
2698 if ((set0
= single_set (i0
))
2699 /* Ensure the source of SET0 is a MEM, possibly buried inside
2701 && (GET_CODE (SET_SRC (set0
)) == MEM
2702 || ((GET_CODE (SET_SRC (set0
)) == ZERO_EXTEND
2703 || GET_CODE (SET_SRC (set0
)) == SIGN_EXTEND
)
2704 && GET_CODE (XEXP (SET_SRC (set0
), 0)) == MEM
))
2705 && (set3
= single_set (i3
))
2706 /* Ensure the destination of SET3 is a MEM. */
2707 && GET_CODE (SET_DEST (set3
)) == MEM
2708 /* Would it be better to extract the base address for the MEM
2709 in SET3 and look for that? I don't have cases where it matters
2710 but I could envision such cases. */
2711 && rtx_referenced_p (XEXP (SET_DEST (set3
), 0), SET_SRC (set0
)))
2714 if (ngood
< 2 && nshift
< 2)
2718 /* Exit early if one of the insns involved can't be used for
2721 || (i1
&& CALL_P (i1
))
2722 || (i0
&& CALL_P (i0
))
2723 || cant_combine_insn_p (i3
)
2724 || cant_combine_insn_p (i2
)
2725 || (i1
&& cant_combine_insn_p (i1
))
2726 || (i0
&& cant_combine_insn_p (i0
))
2727 || likely_spilled_retval_p (i3
))
2731 undobuf
.other_insn
= 0;
2733 /* Reset the hard register usage information. */
2734 CLEAR_HARD_REG_SET (newpat_used_regs
);
2736 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2739 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2740 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2742 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2743 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2745 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2746 INSN_UID (i2
), INSN_UID (i3
));
2749 /* If multiple insns feed into one of I2 or I3, they can be in any
2750 order. To simplify the code below, reorder them in sequence. */
2751 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2753 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2755 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2758 added_links_insn
= 0;
2760 /* First check for one important special case that the code below will
2761 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2762 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2763 we may be able to replace that destination with the destination of I3.
2764 This occurs in the common code where we compute both a quotient and
2765 remainder into a structure, in which case we want to do the computation
2766 directly into the structure to avoid register-register copies.
2768 Note that this case handles both multiple sets in I2 and also cases
2769 where I2 has a number of CLOBBERs inside the PARALLEL.
2771 We make very conservative checks below and only try to handle the
2772 most common cases of this. For example, we only handle the case
2773 where I2 and I3 are adjacent to avoid making difficult register
2776 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2777 && REG_P (SET_SRC (PATTERN (i3
)))
2778 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2779 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2780 && GET_CODE (PATTERN (i2
)) == PARALLEL
2781 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2782 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2783 below would need to check what is inside (and reg_overlap_mentioned_p
2784 doesn't support those codes anyway). Don't allow those destinations;
2785 the resulting insn isn't likely to be recognized anyway. */
2786 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2787 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2788 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2789 SET_DEST (PATTERN (i3
)))
2790 && next_active_insn (i2
) == i3
)
2792 rtx p2
= PATTERN (i2
);
2794 /* Make sure that the destination of I3,
2795 which we are going to substitute into one output of I2,
2796 is not used within another output of I2. We must avoid making this:
2797 (parallel [(set (mem (reg 69)) ...)
2798 (set (reg 69) ...)])
2799 which is not well-defined as to order of actions.
2800 (Besides, reload can't handle output reloads for this.)
2802 The problem can also happen if the dest of I3 is a memory ref,
2803 if another dest in I2 is an indirect memory ref.
2805 Neither can this PARALLEL be an asm. We do not allow combining
2806 that usually (see can_combine_p), so do not here either. */
2808 for (i
= 0; ok
&& i
< XVECLEN (p2
, 0); i
++)
2810 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2811 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
)
2812 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2813 SET_DEST (XVECEXP (p2
, 0, i
))))
2815 else if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2816 && GET_CODE (SET_SRC (XVECEXP (p2
, 0, i
))) == ASM_OPERANDS
)
2821 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2822 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2823 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2828 subst_low_luid
= DF_INSN_LUID (i2
);
2830 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2831 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2832 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2833 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2835 /* Replace the dest in I2 with our dest and make the resulting
2836 insn the new pattern for I3. Then skip to where we validate
2837 the pattern. Everything was set up above. */
2838 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2840 i3_subst_into_i2
= 1;
2841 goto validate_replacement
;
2845 /* If I2 is setting a pseudo to a constant and I3 is setting some
2846 sub-part of it to another constant, merge them by making a new
2849 && (temp_expr
= single_set (i2
)) != 0
2850 && is_a
<scalar_int_mode
> (GET_MODE (SET_DEST (temp_expr
)), &temp_mode
)
2851 && CONST_SCALAR_INT_P (SET_SRC (temp_expr
))
2852 && GET_CODE (PATTERN (i3
)) == SET
2853 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3
)))
2854 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp_expr
)))
2856 rtx dest
= SET_DEST (PATTERN (i3
));
2857 rtx temp_dest
= SET_DEST (temp_expr
);
2861 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2863 if (CONST_INT_P (XEXP (dest
, 1))
2864 && CONST_INT_P (XEXP (dest
, 2))
2865 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (dest
, 0)),
2868 width
= INTVAL (XEXP (dest
, 1));
2869 offset
= INTVAL (XEXP (dest
, 2));
2870 dest
= XEXP (dest
, 0);
2871 if (BITS_BIG_ENDIAN
)
2872 offset
= GET_MODE_PRECISION (dest_mode
) - width
- offset
;
2877 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2878 dest
= XEXP (dest
, 0);
2879 if (is_a
<scalar_int_mode
> (GET_MODE (dest
), &dest_mode
))
2881 width
= GET_MODE_PRECISION (dest_mode
);
2888 /* If this is the low part, we're done. */
2889 if (subreg_lowpart_p (dest
))
2891 /* Handle the case where inner is twice the size of outer. */
2892 else if (GET_MODE_PRECISION (temp_mode
)
2893 == 2 * GET_MODE_PRECISION (dest_mode
))
2894 offset
+= GET_MODE_PRECISION (dest_mode
);
2895 /* Otherwise give up for now. */
2902 rtx inner
= SET_SRC (PATTERN (i3
));
2903 rtx outer
= SET_SRC (temp_expr
);
2905 wide_int o
= wi::insert (rtx_mode_t (outer
, temp_mode
),
2906 rtx_mode_t (inner
, dest_mode
),
2911 subst_low_luid
= DF_INSN_LUID (i2
);
2912 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2914 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2916 /* Replace the source in I2 with the new constant and make the
2917 resulting insn the new pattern for I3. Then skip to where we
2918 validate the pattern. Everything was set up above. */
2919 SUBST (SET_SRC (temp_expr
),
2920 immed_wide_int_const (o
, temp_mode
));
2922 newpat
= PATTERN (i2
);
2924 /* The dest of I3 has been replaced with the dest of I2. */
2925 changed_i3_dest
= 1;
2926 goto validate_replacement
;
2930 /* If we have no I1 and I2 looks like:
2931 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2933 make up a dummy I1 that is
2936 (set (reg:CC X) (compare:CC Y (const_int 0)))
2938 (We can ignore any trailing CLOBBERs.)
2940 This undoes a previous combination and allows us to match a branch-and-
2943 if (!HAVE_cc0
&& i1
== 0
2944 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2945 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
2947 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
2948 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
2949 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
2950 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1)))
2951 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2952 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2954 /* We make I1 with the same INSN_UID as I2. This gives it
2955 the same DF_INSN_LUID for value tracking. Our fake I1 will
2956 never appear in the insn stream so giving it the same INSN_UID
2957 as I2 will not cause a problem. */
2959 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2960 XVECEXP (PATTERN (i2
), 0, 1), INSN_LOCATION (i2
),
2962 INSN_UID (i1
) = INSN_UID (i2
);
2964 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
2965 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
2966 SET_DEST (PATTERN (i1
)));
2967 unsigned int regno
= REGNO (SET_DEST (PATTERN (i1
)));
2968 SUBST_LINK (LOG_LINKS (i2
),
2969 alloc_insn_link (i1
, regno
, LOG_LINKS (i2
)));
2972 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2973 make those two SETs separate I1 and I2 insns, and make an I0 that is
2975 if (!HAVE_cc0
&& i0
== 0
2976 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2977 && can_split_parallel_of_n_reg_sets (i2
, 2)
2978 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2979 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2981 /* If there is no I1, there is no I0 either. */
2984 /* We make I1 with the same INSN_UID as I2. This gives it
2985 the same DF_INSN_LUID for value tracking. Our fake I1 will
2986 never appear in the insn stream so giving it the same INSN_UID
2987 as I2 will not cause a problem. */
2989 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2990 XVECEXP (PATTERN (i2
), 0, 0), INSN_LOCATION (i2
),
2992 INSN_UID (i1
) = INSN_UID (i2
);
2994 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 1));
2997 /* Verify that I2 and I1 are valid for combining. */
2998 if (! can_combine_p (i2
, i3
, i0
, i1
, NULL
, NULL
, &i2dest
, &i2src
)
2999 || (i1
&& ! can_combine_p (i1
, i3
, i0
, NULL
, i2
, NULL
,
3001 || (i0
&& ! can_combine_p (i0
, i3
, NULL
, NULL
, i1
, i2
,
3008 /* Record whether I2DEST is used in I2SRC and similarly for the other
3009 cases. Knowing this will help in register status updating below. */
3010 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
3011 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
3012 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
3013 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
3014 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
3015 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
3016 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
3017 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
3018 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
3020 /* For the earlier insns, determine which of the subsequent ones they
3022 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
3023 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
3024 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
3025 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
3026 && reg_overlap_mentioned_p (i0dest
, i2src
))));
3028 /* Ensure that I3's pattern can be the destination of combines. */
3029 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
3030 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
3031 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
3032 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
3039 /* See if any of the insns is a MULT operation. Unless one is, we will
3040 reject a combination that is, since it must be slower. Be conservative
3042 if (GET_CODE (i2src
) == MULT
3043 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
3044 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
3045 || (GET_CODE (PATTERN (i3
)) == SET
3046 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
3049 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3050 We used to do this EXCEPT in one case: I3 has a post-inc in an
3051 output operand. However, that exception can give rise to insns like
3053 which is a famous insn on the PDP-11 where the value of r3 used as the
3054 source was model-dependent. Avoid this sort of thing. */
3057 if (!(GET_CODE (PATTERN (i3
)) == SET
3058 && REG_P (SET_SRC (PATTERN (i3
)))
3059 && MEM_P (SET_DEST (PATTERN (i3
)))
3060 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
3061 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
3062 /* It's not the exception. */
3067 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
3068 if (REG_NOTE_KIND (link
) == REG_INC
3069 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
3071 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
3078 /* See if the SETs in I1 or I2 need to be kept around in the merged
3079 instruction: whenever the value set there is still needed past I3.
3080 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3082 For the SET in I1, we have two cases: if I1 and I2 independently feed
3083 into I3, the set in I1 needs to be kept around unless I1DEST dies
3084 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3085 in I1 needs to be kept around unless I1DEST dies or is set in either
3086 I2 or I3. The same considerations apply to I0. */
3088 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
3091 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
3092 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
3097 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
3098 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
))
3099 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3100 && dead_or_set_p (i2
, i0dest
)));
3104 /* We are about to copy insns for the case where they need to be kept
3105 around. Check that they can be copied in the merged instruction. */
3107 if (targetm
.cannot_copy_insn_p
3108 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
3109 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
3110 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
3116 /* If the set in I2 needs to be kept around, we must make a copy of
3117 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3118 PATTERN (I2), we are only substituting for the original I1DEST, not into
3119 an already-substituted copy. This also prevents making self-referential
3120 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3125 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3126 i2pat
= gen_rtx_SET (i2dest
, copy_rtx (i2src
));
3128 i2pat
= copy_rtx (PATTERN (i2
));
3133 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3134 i1pat
= gen_rtx_SET (i1dest
, copy_rtx (i1src
));
3136 i1pat
= copy_rtx (PATTERN (i1
));
3141 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3142 i0pat
= gen_rtx_SET (i0dest
, copy_rtx (i0src
));
3144 i0pat
= copy_rtx (PATTERN (i0
));
3149 /* Substitute in the latest insn for the regs set by the earlier ones. */
3151 maxreg
= max_reg_num ();
3155 /* Many machines that don't use CC0 have insns that can both perform an
3156 arithmetic operation and set the condition code. These operations will
3157 be represented as a PARALLEL with the first element of the vector
3158 being a COMPARE of an arithmetic operation with the constant zero.
3159 The second element of the vector will set some pseudo to the result
3160 of the same arithmetic operation. If we simplify the COMPARE, we won't
3161 match such a pattern and so will generate an extra insn. Here we test
3162 for this case, where both the comparison and the operation result are
3163 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3164 I2SRC. Later we will make the PARALLEL that contains I2. */
3166 if (!HAVE_cc0
&& i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3167 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3168 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3
)), 1))
3169 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3172 rtx
*cc_use_loc
= NULL
;
3173 rtx_insn
*cc_use_insn
= NULL
;
3174 rtx op0
= i2src
, op1
= XEXP (SET_SRC (PATTERN (i3
)), 1);
3175 machine_mode compare_mode
, orig_compare_mode
;
3176 enum rtx_code compare_code
= UNKNOWN
, orig_compare_code
= UNKNOWN
;
3177 scalar_int_mode mode
;
3179 newpat
= PATTERN (i3
);
3180 newpat_dest
= SET_DEST (newpat
);
3181 compare_mode
= orig_compare_mode
= GET_MODE (newpat_dest
);
3183 if (undobuf
.other_insn
== 0
3184 && (cc_use_loc
= find_single_use (SET_DEST (newpat
), i3
,
3187 compare_code
= orig_compare_code
= GET_CODE (*cc_use_loc
);
3188 if (is_a
<scalar_int_mode
> (GET_MODE (i2dest
), &mode
))
3189 compare_code
= simplify_compare_const (compare_code
, mode
,
3191 target_canonicalize_comparison (&compare_code
, &op0
, &op1
, 1);
3194 /* Do the rest only if op1 is const0_rtx, which may be the
3195 result of simplification. */
3196 if (op1
== const0_rtx
)
3198 /* If a single use of the CC is found, prepare to modify it
3199 when SELECT_CC_MODE returns a new CC-class mode, or when
3200 the above simplify_compare_const() returned a new comparison
3201 operator. undobuf.other_insn is assigned the CC use insn
3202 when modifying it. */
3205 #ifdef SELECT_CC_MODE
3206 machine_mode new_mode
3207 = SELECT_CC_MODE (compare_code
, op0
, op1
);
3208 if (new_mode
!= orig_compare_mode
3209 && can_change_dest_mode (SET_DEST (newpat
),
3210 added_sets_2
, new_mode
))
3212 unsigned int regno
= REGNO (newpat_dest
);
3213 compare_mode
= new_mode
;
3214 if (regno
< FIRST_PSEUDO_REGISTER
)
3215 newpat_dest
= gen_rtx_REG (compare_mode
, regno
);
3218 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3219 newpat_dest
= regno_reg_rtx
[regno
];
3223 /* Cases for modifying the CC-using comparison. */
3224 if (compare_code
!= orig_compare_code
3225 /* ??? Do we need to verify the zero rtx? */
3226 && XEXP (*cc_use_loc
, 1) == const0_rtx
)
3228 /* Replace cc_use_loc with entire new RTX. */
3230 gen_rtx_fmt_ee (compare_code
, compare_mode
,
3231 newpat_dest
, const0_rtx
));
3232 undobuf
.other_insn
= cc_use_insn
;
3234 else if (compare_mode
!= orig_compare_mode
)
3236 /* Just replace the CC reg with a new mode. */
3237 SUBST (XEXP (*cc_use_loc
, 0), newpat_dest
);
3238 undobuf
.other_insn
= cc_use_insn
;
3242 /* Now we modify the current newpat:
3243 First, SET_DEST(newpat) is updated if the CC mode has been
3244 altered. For targets without SELECT_CC_MODE, this should be
3246 if (compare_mode
!= orig_compare_mode
)
3247 SUBST (SET_DEST (newpat
), newpat_dest
);
3248 /* This is always done to propagate i2src into newpat. */
3249 SUBST (SET_SRC (newpat
),
3250 gen_rtx_COMPARE (compare_mode
, op0
, op1
));
3251 /* Create new version of i2pat if needed; the below PARALLEL
3252 creation needs this to work correctly. */
3253 if (! rtx_equal_p (i2src
, op0
))
3254 i2pat
= gen_rtx_SET (i2dest
, op0
);
3259 if (i2_is_used
== 0)
3261 /* It is possible that the source of I2 or I1 may be performing
3262 an unneeded operation, such as a ZERO_EXTEND of something
3263 that is known to have the high part zero. Handle that case
3264 by letting subst look at the inner insns.
3266 Another way to do this would be to have a function that tries
3267 to simplify a single insn instead of merging two or more
3268 insns. We don't do this because of the potential of infinite
3269 loops and because of the potential extra memory required.
3270 However, doing it the way we are is a bit of a kludge and
3271 doesn't catch all cases.
3273 But only do this if -fexpensive-optimizations since it slows
3274 things down and doesn't usually win.
3276 This is not done in the COMPARE case above because the
3277 unmodified I2PAT is used in the PARALLEL and so a pattern
3278 with a modified I2SRC would not match. */
3280 if (flag_expensive_optimizations
)
3282 /* Pass pc_rtx so no substitutions are done, just
3286 subst_low_luid
= DF_INSN_LUID (i1
);
3287 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3290 subst_low_luid
= DF_INSN_LUID (i2
);
3291 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3294 n_occurrences
= 0; /* `subst' counts here */
3295 subst_low_luid
= DF_INSN_LUID (i2
);
3297 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3298 copy of I2SRC each time we substitute it, in order to avoid creating
3299 self-referential RTL when we will be substituting I1SRC for I1DEST
3300 later. Likewise if I0 feeds into I2, either directly or indirectly
3301 through I1, and I0DEST is in I0SRC. */
3302 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0, 0,
3303 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3304 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3305 && i0dest_in_i0src
));
3308 /* Record whether I2's body now appears within I3's body. */
3309 i2_is_used
= n_occurrences
;
3312 /* If we already got a failure, don't try to do more. Otherwise, try to
3313 substitute I1 if we have it. */
3315 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3317 /* Check that an autoincrement side-effect on I1 has not been lost.
3318 This happens if I1DEST is mentioned in I2 and dies there, and
3319 has disappeared from the new pattern. */
3320 if ((FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3322 && dead_or_set_p (i2
, i1dest
)
3323 && !reg_overlap_mentioned_p (i1dest
, newpat
))
3324 /* Before we can do this substitution, we must redo the test done
3325 above (see detailed comments there) that ensures I1DEST isn't
3326 mentioned in any SETs in NEWPAT that are field assignments. */
3327 || !combinable_i3pat (NULL
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3335 subst_low_luid
= DF_INSN_LUID (i1
);
3337 /* If the following substitution will modify I1SRC, make a copy of it
3338 for the case where it is substituted for I1DEST in I2PAT later. */
3339 if (added_sets_2
&& i1_feeds_i2_n
)
3340 i1src_copy
= copy_rtx (i1src
);
3342 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3343 copy of I1SRC each time we substitute it, in order to avoid creating
3344 self-referential RTL when we will be substituting I0SRC for I0DEST
3346 newpat
= subst (newpat
, i1dest
, i1src
, 0, 0,
3347 i0_feeds_i1_n
&& i0dest_in_i0src
);
3350 /* Record whether I1's body now appears within I3's body. */
3351 i1_is_used
= n_occurrences
;
3354 /* Likewise for I0 if we have it. */
3356 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3358 if ((FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3359 && ((i0_feeds_i2_n
&& dead_or_set_p (i2
, i0dest
))
3360 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
)))
3361 && !reg_overlap_mentioned_p (i0dest
, newpat
))
3362 || !combinable_i3pat (NULL
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3369 /* If the following substitution will modify I0SRC, make a copy of it
3370 for the case where it is substituted for I0DEST in I1PAT later. */
3371 if (added_sets_1
&& i0_feeds_i1_n
)
3372 i0src_copy
= copy_rtx (i0src
);
3373 /* And a copy for I0DEST in I2PAT substitution. */
3374 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3375 || (i0_feeds_i2_n
)))
3376 i0src_copy2
= copy_rtx (i0src
);
3379 subst_low_luid
= DF_INSN_LUID (i0
);
3380 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0, 0);
3384 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3385 to count all the ways that I2SRC and I1SRC can be used. */
3386 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3387 && i2_is_used
+ added_sets_2
> 1)
3388 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3389 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3391 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3392 && (n_occurrences
+ added_sets_0
3393 + (added_sets_1
&& i0_feeds_i1_n
)
3394 + (added_sets_2
&& i0_feeds_i2_n
)
3396 /* Fail if we tried to make a new register. */
3397 || max_reg_num () != maxreg
3398 /* Fail if we couldn't do something and have a CLOBBER. */
3399 || GET_CODE (newpat
) == CLOBBER
3400 /* Fail if this new pattern is a MULT and we didn't have one before
3401 at the outer level. */
3402 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3409 /* If the actions of the earlier insns must be kept
3410 in addition to substituting them into the latest one,
3411 we must make a new PARALLEL for the latest insn
3412 to hold additional the SETs. */
3414 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3416 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3419 if (GET_CODE (newpat
) == PARALLEL
)
3421 rtvec old
= XVEC (newpat
, 0);
3422 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3423 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3424 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3425 sizeof (old
->elem
[0]) * old
->num_elem
);
3430 total_sets
= 1 + extra_sets
;
3431 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3432 XVECEXP (newpat
, 0, 0) = old
;
3436 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3442 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0, 0);
3444 XVECEXP (newpat
, 0, --total_sets
) = t
;
3450 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0, 0,
3451 i0_feeds_i1_n
&& i0dest_in_i0src
);
3452 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3453 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0, 0);
3455 XVECEXP (newpat
, 0, --total_sets
) = t
;
3459 validate_replacement
:
3461 /* Note which hard regs this insn has as inputs. */
3462 mark_used_regs_combine (newpat
);
3464 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3465 consider splitting this pattern, we might need these clobbers. */
3466 if (i1
&& GET_CODE (newpat
) == PARALLEL
3467 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3469 int len
= XVECLEN (newpat
, 0);
3471 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3472 for (i
= 0; i
< len
; i
++)
3473 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3476 /* We have recognized nothing yet. */
3477 insn_code_number
= -1;
3479 /* See if this is a PARALLEL of two SETs where one SET's destination is
3480 a register that is unused and this isn't marked as an instruction that
3481 might trap in an EH region. In that case, we just need the other SET.
3482 We prefer this over the PARALLEL.
3484 This can occur when simplifying a divmod insn. We *must* test for this
3485 case here because the code below that splits two independent SETs doesn't
3486 handle this case correctly when it updates the register status.
3488 It's pointless doing this if we originally had two sets, one from
3489 i3, and one from i2. Combining then splitting the parallel results
3490 in the original i2 again plus an invalid insn (which we delete).
3491 The net effect is only to move instructions around, which makes
3492 debug info less accurate.
3494 If the remaining SET came from I2 its destination should not be used
3495 between I2 and I3. See PR82024. */
3497 if (!(added_sets_2
&& i1
== 0)
3498 && is_parallel_of_n_reg_sets (newpat
, 2)
3499 && asm_noperands (newpat
) < 0)
3501 rtx set0
= XVECEXP (newpat
, 0, 0);
3502 rtx set1
= XVECEXP (newpat
, 0, 1);
3503 rtx oldpat
= newpat
;
3505 if (((REG_P (SET_DEST (set1
))
3506 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3507 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3508 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3509 && insn_nothrow_p (i3
)
3510 && !side_effects_p (SET_SRC (set1
)))
3513 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3516 else if (((REG_P (SET_DEST (set0
))
3517 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3518 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3519 && find_reg_note (i3
, REG_UNUSED
,
3520 SUBREG_REG (SET_DEST (set0
)))))
3521 && insn_nothrow_p (i3
)
3522 && !side_effects_p (SET_SRC (set0
)))
3524 rtx dest
= SET_DEST (set1
);
3525 if (GET_CODE (dest
) == SUBREG
)
3526 dest
= SUBREG_REG (dest
);
3527 if (!reg_used_between_p (dest
, i2
, i3
))
3530 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3532 if (insn_code_number
>= 0)
3533 changed_i3_dest
= 1;
3537 if (insn_code_number
< 0)
3541 /* Is the result of combination a valid instruction? */
3542 if (insn_code_number
< 0)
3543 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3545 /* If we were combining three insns and the result is a simple SET
3546 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3547 insns. There are two ways to do this. It can be split using a
3548 machine-specific method (like when you have an addition of a large
3549 constant) or by combine in the function find_split_point. */
3551 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3552 && asm_noperands (newpat
) < 0)
3554 rtx parallel
, *split
;
3555 rtx_insn
*m_split_insn
;
3557 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3558 use I2DEST as a scratch register will help. In the latter case,
3559 convert I2DEST to the mode of the source of NEWPAT if we can. */
3561 m_split_insn
= combine_split_insns (newpat
, i3
);
3563 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3564 inputs of NEWPAT. */
3566 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3567 possible to try that as a scratch reg. This would require adding
3568 more code to make it work though. */
3570 if (m_split_insn
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3572 machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3574 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3575 (temporarily, until we are committed to this instruction
3576 combination) does not work: for example, any call to nonzero_bits
3577 on the register (from a splitter in the MD file, for example)
3578 will get the old information, which is invalid.
3580 Since nowadays we can create registers during combine just fine,
3581 we should just create a new one here, not reuse i2dest. */
3583 /* First try to split using the original register as a
3584 scratch register. */
3585 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3586 gen_rtvec (2, newpat
,
3587 gen_rtx_CLOBBER (VOIDmode
,
3589 m_split_insn
= combine_split_insns (parallel
, i3
);
3591 /* If that didn't work, try changing the mode of I2DEST if
3593 if (m_split_insn
== 0
3594 && new_mode
!= GET_MODE (i2dest
)
3595 && new_mode
!= VOIDmode
3596 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3598 machine_mode old_mode
= GET_MODE (i2dest
);
3601 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3602 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3605 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3606 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3609 parallel
= (gen_rtx_PARALLEL
3611 gen_rtvec (2, newpat
,
3612 gen_rtx_CLOBBER (VOIDmode
,
3614 m_split_insn
= combine_split_insns (parallel
, i3
);
3616 if (m_split_insn
== 0
3617 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3621 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3622 buf
= undobuf
.undos
;
3623 undobuf
.undos
= buf
->next
;
3624 buf
->next
= undobuf
.frees
;
3625 undobuf
.frees
= buf
;
3629 i2scratch
= m_split_insn
!= 0;
3632 /* If recog_for_combine has discarded clobbers, try to use them
3633 again for the split. */
3634 if (m_split_insn
== 0 && newpat_vec_with_clobbers
)
3636 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3637 m_split_insn
= combine_split_insns (parallel
, i3
);
3640 if (m_split_insn
&& NEXT_INSN (m_split_insn
) == NULL_RTX
)
3642 rtx m_split_pat
= PATTERN (m_split_insn
);
3643 insn_code_number
= recog_for_combine (&m_split_pat
, i3
, &new_i3_notes
);
3644 if (insn_code_number
>= 0)
3645 newpat
= m_split_pat
;
3647 else if (m_split_insn
&& NEXT_INSN (NEXT_INSN (m_split_insn
)) == NULL_RTX
3648 && (next_nonnote_nondebug_insn (i2
) == i3
3649 || ! use_crosses_set_p (PATTERN (m_split_insn
), DF_INSN_LUID (i2
))))
3652 rtx newi3pat
= PATTERN (NEXT_INSN (m_split_insn
));
3653 newi2pat
= PATTERN (m_split_insn
);
3655 i3set
= single_set (NEXT_INSN (m_split_insn
));
3656 i2set
= single_set (m_split_insn
);
3658 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3660 /* If I2 or I3 has multiple SETs, we won't know how to track
3661 register status, so don't use these insns. If I2's destination
3662 is used between I2 and I3, we also can't use these insns. */
3664 if (i2_code_number
>= 0 && i2set
&& i3set
3665 && (next_nonnote_nondebug_insn (i2
) == i3
3666 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3667 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3669 if (insn_code_number
>= 0)
3672 /* It is possible that both insns now set the destination of I3.
3673 If so, we must show an extra use of it. */
3675 if (insn_code_number
>= 0)
3677 rtx new_i3_dest
= SET_DEST (i3set
);
3678 rtx new_i2_dest
= SET_DEST (i2set
);
3680 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3681 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3682 || GET_CODE (new_i3_dest
) == SUBREG
)
3683 new_i3_dest
= XEXP (new_i3_dest
, 0);
3685 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3686 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3687 || GET_CODE (new_i2_dest
) == SUBREG
)
3688 new_i2_dest
= XEXP (new_i2_dest
, 0);
3690 if (REG_P (new_i3_dest
)
3691 && REG_P (new_i2_dest
)
3692 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
)
3693 && REGNO (new_i2_dest
) < reg_n_sets_max
)
3694 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3698 /* If we can split it and use I2DEST, go ahead and see if that
3699 helps things be recognized. Verify that none of the registers
3700 are set between I2 and I3. */
3701 if (insn_code_number
< 0
3702 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3703 && (!HAVE_cc0
|| REG_P (i2dest
))
3704 /* We need I2DEST in the proper mode. If it is a hard register
3705 or the only use of a pseudo, we can change its mode.
3706 Make sure we don't change a hard register to have a mode that
3707 isn't valid for it, or change the number of registers. */
3708 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3709 || GET_MODE (*split
) == VOIDmode
3710 || can_change_dest_mode (i2dest
, added_sets_2
,
3712 && (next_nonnote_nondebug_insn (i2
) == i3
3713 || ! use_crosses_set_p (*split
, DF_INSN_LUID (i2
)))
3714 /* We can't overwrite I2DEST if its value is still used by
3716 && ! reg_referenced_p (i2dest
, newpat
))
3718 rtx newdest
= i2dest
;
3719 enum rtx_code split_code
= GET_CODE (*split
);
3720 machine_mode split_mode
= GET_MODE (*split
);
3721 bool subst_done
= false;
3722 newi2pat
= NULL_RTX
;
3726 /* *SPLIT may be part of I2SRC, so make sure we have the
3727 original expression around for later debug processing.
3728 We should not need I2SRC any more in other cases. */
3729 if (MAY_HAVE_DEBUG_INSNS
)
3730 i2src
= copy_rtx (i2src
);
3734 /* Get NEWDEST as a register in the proper mode. We have already
3735 validated that we can do this. */
3736 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3738 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3739 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3742 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3743 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3747 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3748 an ASHIFT. This can occur if it was inside a PLUS and hence
3749 appeared to be a memory address. This is a kludge. */
3750 if (split_code
== MULT
3751 && CONST_INT_P (XEXP (*split
, 1))
3752 && INTVAL (XEXP (*split
, 1)) > 0
3753 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3755 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3756 XEXP (*split
, 0), GEN_INT (i
)));
3757 /* Update split_code because we may not have a multiply
3759 split_code
= GET_CODE (*split
);
3762 /* Similarly for (plus (mult FOO (const_int pow2))). */
3763 if (split_code
== PLUS
3764 && GET_CODE (XEXP (*split
, 0)) == MULT
3765 && CONST_INT_P (XEXP (XEXP (*split
, 0), 1))
3766 && INTVAL (XEXP (XEXP (*split
, 0), 1)) > 0
3767 && (i
= exact_log2 (UINTVAL (XEXP (XEXP (*split
, 0), 1)))) >= 0)
3769 rtx nsplit
= XEXP (*split
, 0);
3770 SUBST (XEXP (*split
, 0), gen_rtx_ASHIFT (GET_MODE (nsplit
),
3771 XEXP (nsplit
, 0), GEN_INT (i
)));
3772 /* Update split_code because we may not have a multiply
3774 split_code
= GET_CODE (*split
);
3777 #ifdef INSN_SCHEDULING
3778 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3779 be written as a ZERO_EXTEND. */
3780 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3782 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3783 what it really is. */
3784 if (load_extend_op (GET_MODE (SUBREG_REG (*split
)))
3786 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3787 SUBREG_REG (*split
)));
3789 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3790 SUBREG_REG (*split
)));
3794 /* Attempt to split binary operators using arithmetic identities. */
3795 if (BINARY_P (SET_SRC (newpat
))
3796 && split_mode
== GET_MODE (SET_SRC (newpat
))
3797 && ! side_effects_p (SET_SRC (newpat
)))
3799 rtx setsrc
= SET_SRC (newpat
);
3800 machine_mode mode
= GET_MODE (setsrc
);
3801 enum rtx_code code
= GET_CODE (setsrc
);
3802 rtx src_op0
= XEXP (setsrc
, 0);
3803 rtx src_op1
= XEXP (setsrc
, 1);
3805 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3806 if (rtx_equal_p (src_op0
, src_op1
))
3808 newi2pat
= gen_rtx_SET (newdest
, src_op0
);
3809 SUBST (XEXP (setsrc
, 0), newdest
);
3810 SUBST (XEXP (setsrc
, 1), newdest
);
3813 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3814 else if ((code
== PLUS
|| code
== MULT
)
3815 && GET_CODE (src_op0
) == code
3816 && GET_CODE (XEXP (src_op0
, 0)) == code
3817 && (INTEGRAL_MODE_P (mode
)
3818 || (FLOAT_MODE_P (mode
)
3819 && flag_unsafe_math_optimizations
)))
3821 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3822 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3823 rtx r
= XEXP (src_op0
, 1);
3826 /* Split both "((X op Y) op X) op Y" and
3827 "((X op Y) op Y) op X" as "T op T" where T is
3829 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3830 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3832 newi2pat
= gen_rtx_SET (newdest
, XEXP (src_op0
, 0));
3833 SUBST (XEXP (setsrc
, 0), newdest
);
3834 SUBST (XEXP (setsrc
, 1), newdest
);
3837 /* Split "((X op X) op Y) op Y)" as "T op T" where
3839 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3841 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3842 newi2pat
= gen_rtx_SET (newdest
, tmp
);
3843 SUBST (XEXP (setsrc
, 0), newdest
);
3844 SUBST (XEXP (setsrc
, 1), newdest
);
3852 newi2pat
= gen_rtx_SET (newdest
, *split
);
3853 SUBST (*split
, newdest
);
3856 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3858 /* recog_for_combine might have added CLOBBERs to newi2pat.
3859 Make sure NEWPAT does not depend on the clobbered regs. */
3860 if (GET_CODE (newi2pat
) == PARALLEL
)
3861 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3862 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3864 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3865 if (reg_overlap_mentioned_p (reg
, newpat
))
3872 /* If the split point was a MULT and we didn't have one before,
3873 don't use one now. */
3874 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3875 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3879 /* Check for a case where we loaded from memory in a narrow mode and
3880 then sign extended it, but we need both registers. In that case,
3881 we have a PARALLEL with both loads from the same memory location.
3882 We can split this into a load from memory followed by a register-register
3883 copy. This saves at least one insn, more if register allocation can
3886 We cannot do this if the destination of the first assignment is a
3887 condition code register or cc0. We eliminate this case by making sure
3888 the SET_DEST and SET_SRC have the same mode.
3890 We cannot do this if the destination of the second assignment is
3891 a register that we have already assumed is zero-extended. Similarly
3892 for a SUBREG of such a register. */
3894 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
3895 && GET_CODE (newpat
) == PARALLEL
3896 && XVECLEN (newpat
, 0) == 2
3897 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3898 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
3899 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
3900 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
3901 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3902 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3903 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
3904 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3906 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3907 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3908 && ! (temp_expr
= SET_DEST (XVECEXP (newpat
, 0, 1)),
3910 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3911 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3912 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3913 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3914 != GET_MODE_MASK (word_mode
))))
3915 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
3916 && (temp_expr
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
3918 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3919 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3920 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3921 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3922 != GET_MODE_MASK (word_mode
)))))
3923 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3924 SET_SRC (XVECEXP (newpat
, 0, 1)))
3925 && ! find_reg_note (i3
, REG_UNUSED
,
3926 SET_DEST (XVECEXP (newpat
, 0, 0))))
3930 newi2pat
= XVECEXP (newpat
, 0, 0);
3931 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
3932 newpat
= XVECEXP (newpat
, 0, 1);
3933 SUBST (SET_SRC (newpat
),
3934 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
3935 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3937 if (i2_code_number
>= 0)
3938 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3940 if (insn_code_number
>= 0)
3944 /* Similarly, check for a case where we have a PARALLEL of two independent
3945 SETs but we started with three insns. In this case, we can do the sets
3946 as two separate insns. This case occurs when some SET allows two
3947 other insns to combine, but the destination of that SET is still live.
3949 Also do this if we started with two insns and (at least) one of the
3950 resulting sets is a noop; this noop will be deleted later. */
3952 else if (insn_code_number
< 0 && asm_noperands (newpat
) < 0
3953 && GET_CODE (newpat
) == PARALLEL
3954 && XVECLEN (newpat
, 0) == 2
3955 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3956 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3957 && (i1
|| set_noop_p (XVECEXP (newpat
, 0, 0))
3958 || set_noop_p (XVECEXP (newpat
, 0, 1)))
3959 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
3960 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
3961 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3962 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3963 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3964 XVECEXP (newpat
, 0, 0))
3965 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
3966 XVECEXP (newpat
, 0, 1))
3967 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
3968 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
3970 rtx set0
= XVECEXP (newpat
, 0, 0);
3971 rtx set1
= XVECEXP (newpat
, 0, 1);
3973 /* Normally, it doesn't matter which of the two is done first,
3974 but the one that references cc0 can't be the second, and
3975 one which uses any regs/memory set in between i2 and i3 can't
3976 be first. The PARALLEL might also have been pre-existing in i3,
3977 so we need to make sure that we won't wrongly hoist a SET to i2
3978 that would conflict with a death note present in there. */
3979 if (!use_crosses_set_p (SET_SRC (set1
), DF_INSN_LUID (i2
))
3980 && !(REG_P (SET_DEST (set1
))
3981 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set1
)))
3982 && !(GET_CODE (SET_DEST (set1
)) == SUBREG
3983 && find_reg_note (i2
, REG_DEAD
,
3984 SUBREG_REG (SET_DEST (set1
))))
3985 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set0
))
3986 /* If I3 is a jump, ensure that set0 is a jump so that
3987 we do not create invalid RTL. */
3988 && (!JUMP_P (i3
) || SET_DEST (set0
) == pc_rtx
)
3994 else if (!use_crosses_set_p (SET_SRC (set0
), DF_INSN_LUID (i2
))
3995 && !(REG_P (SET_DEST (set0
))
3996 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set0
)))
3997 && !(GET_CODE (SET_DEST (set0
)) == SUBREG
3998 && find_reg_note (i2
, REG_DEAD
,
3999 SUBREG_REG (SET_DEST (set0
))))
4000 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set1
))
4001 /* If I3 is a jump, ensure that set1 is a jump so that
4002 we do not create invalid RTL. */
4003 && (!JUMP_P (i3
) || SET_DEST (set1
) == pc_rtx
)
4015 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
4017 if (i2_code_number
>= 0)
4019 /* recog_for_combine might have added CLOBBERs to newi2pat.
4020 Make sure NEWPAT does not depend on the clobbered regs. */
4021 if (GET_CODE (newi2pat
) == PARALLEL
)
4023 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
4024 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
4026 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
4027 if (reg_overlap_mentioned_p (reg
, newpat
))
4035 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4039 /* If it still isn't recognized, fail and change things back the way they
4041 if ((insn_code_number
< 0
4042 /* Is the result a reasonable ASM_OPERANDS? */
4043 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
4049 /* If we had to change another insn, make sure it is valid also. */
4050 if (undobuf
.other_insn
)
4052 CLEAR_HARD_REG_SET (newpat_used_regs
);
4054 other_pat
= PATTERN (undobuf
.other_insn
);
4055 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
4058 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
4065 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4066 they are adjacent to each other or not. */
4069 rtx_insn
*p
= prev_nonnote_insn (i3
);
4070 if (p
&& p
!= i2
&& NONJUMP_INSN_P (p
) && newi2pat
4071 && sets_cc0_p (newi2pat
))
4078 /* Only allow this combination if insn_rtx_costs reports that the
4079 replacement instructions are cheaper than the originals. */
4080 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
4086 if (MAY_HAVE_DEBUG_INSNS
)
4090 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
4091 if (undo
->kind
== UNDO_MODE
)
4093 rtx reg
= *undo
->where
.r
;
4094 machine_mode new_mode
= GET_MODE (reg
);
4095 machine_mode old_mode
= undo
->old_contents
.m
;
4097 /* Temporarily revert mode back. */
4098 adjust_reg_mode (reg
, old_mode
);
4100 if (reg
== i2dest
&& i2scratch
)
4102 /* If we used i2dest as a scratch register with a
4103 different mode, substitute it for the original
4104 i2src while its original mode is temporarily
4105 restored, and then clear i2scratch so that we don't
4106 do it again later. */
4107 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
,
4110 /* Put back the new mode. */
4111 adjust_reg_mode (reg
, new_mode
);
4115 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
4116 rtx_insn
*first
, *last
;
4121 last
= last_combined_insn
;
4126 last
= undobuf
.other_insn
;
4128 if (DF_INSN_LUID (last
)
4129 < DF_INSN_LUID (last_combined_insn
))
4130 last
= last_combined_insn
;
4133 /* We're dealing with a reg that changed mode but not
4134 meaning, so we want to turn it into a subreg for
4135 the new mode. However, because of REG sharing and
4136 because its mode had already changed, we have to do
4137 it in two steps. First, replace any debug uses of
4138 reg, with its original mode temporarily restored,
4139 with this copy we have created; then, replace the
4140 copy with the SUBREG of the original shared reg,
4141 once again changed to the new mode. */
4142 propagate_for_debug (first
, last
, reg
, tempreg
,
4144 adjust_reg_mode (reg
, new_mode
);
4145 propagate_for_debug (first
, last
, tempreg
,
4146 lowpart_subreg (old_mode
, reg
, new_mode
),
4152 /* If we will be able to accept this, we have made a
4153 change to the destination of I3. This requires us to
4154 do a few adjustments. */
4156 if (changed_i3_dest
)
4158 PATTERN (i3
) = newpat
;
4159 adjust_for_new_dest (i3
);
4162 /* We now know that we can do this combination. Merge the insns and
4163 update the status of registers and LOG_LINKS. */
4165 if (undobuf
.other_insn
)
4169 PATTERN (undobuf
.other_insn
) = other_pat
;
4171 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4172 ensure that they are still valid. Then add any non-duplicate
4173 notes added by recog_for_combine. */
4174 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
4176 next
= XEXP (note
, 1);
4178 if ((REG_NOTE_KIND (note
) == REG_DEAD
4179 && !reg_referenced_p (XEXP (note
, 0),
4180 PATTERN (undobuf
.other_insn
)))
4181 ||(REG_NOTE_KIND (note
) == REG_UNUSED
4182 && !reg_set_p (XEXP (note
, 0),
4183 PATTERN (undobuf
.other_insn
)))
4184 /* Simply drop equal note since it may be no longer valid
4185 for other_insn. It may be possible to record that CC
4186 register is changed and only discard those notes, but
4187 in practice it's unnecessary complication and doesn't
4188 give any meaningful improvement.
4191 || REG_NOTE_KIND (note
) == REG_EQUAL
4192 || REG_NOTE_KIND (note
) == REG_EQUIV
)
4193 remove_note (undobuf
.other_insn
, note
);
4196 distribute_notes (new_other_notes
, undobuf
.other_insn
,
4197 undobuf
.other_insn
, NULL
, NULL_RTX
, NULL_RTX
,
4204 struct insn_link
*link
;
4207 /* I3 now uses what used to be its destination and which is now
4208 I2's destination. This requires us to do a few adjustments. */
4209 PATTERN (i3
) = newpat
;
4210 adjust_for_new_dest (i3
);
4212 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4215 However, some later insn might be using I2's dest and have
4216 a LOG_LINK pointing at I3. We must remove this link.
4217 The simplest way to remove the link is to point it at I1,
4218 which we know will be a NOTE. */
4220 /* newi2pat is usually a SET here; however, recog_for_combine might
4221 have added some clobbers. */
4222 if (GET_CODE (newi2pat
) == PARALLEL
)
4223 ni2dest
= SET_DEST (XVECEXP (newi2pat
, 0, 0));
4225 ni2dest
= SET_DEST (newi2pat
);
4227 for (insn
= NEXT_INSN (i3
);
4228 insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4229 || insn
!= BB_HEAD (this_basic_block
->next_bb
));
4230 insn
= NEXT_INSN (insn
))
4232 if (NONDEBUG_INSN_P (insn
)
4233 && reg_referenced_p (ni2dest
, PATTERN (insn
)))
4235 FOR_EACH_LOG_LINK (link
, insn
)
4236 if (link
->insn
== i3
)
4245 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4246 struct insn_link
*i3links
, *i2links
, *i1links
= 0, *i0links
= 0;
4249 /* Compute which registers we expect to eliminate. newi2pat may be setting
4250 either i3dest or i2dest, so we must check it. */
4251 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4252 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4255 /* For i1, we need to compute both local elimination and global
4256 elimination information with respect to newi2pat because i1dest
4257 may be the same as i3dest, in which case newi2pat may be setting
4258 i1dest. Global information is used when distributing REG_DEAD
4259 note for i2 and i3, in which case it does matter if newi2pat sets
4262 Local information is used when distributing REG_DEAD note for i1,
4263 in which case it doesn't matter if newi2pat sets i1dest or not.
4264 See PR62151, if we have four insns combination:
4266 i1: r1 <- i1src (using r0)
4268 i2: r0 <- i2src (using r1)
4269 i3: r3 <- i3src (using r0)
4271 From i1's point of view, r0 is eliminated, no matter if it is set
4272 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4273 should be discarded.
4275 Note local information only affects cases in forms like "I1->I2->I3",
4276 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4277 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4279 rtx local_elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4282 rtx elim_i1
= (local_elim_i1
== 0
4283 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4285 /* Same case as i1. */
4286 rtx local_elim_i0
= (i0
== 0 || i0dest_in_i0src
|| !i0dest_killed
4288 rtx elim_i0
= (local_elim_i0
== 0
4289 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4292 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4294 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4295 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4297 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4299 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4301 /* Ensure that we do not have something that should not be shared but
4302 occurs multiple times in the new insns. Check this by first
4303 resetting all the `used' flags and then copying anything is shared. */
4305 reset_used_flags (i3notes
);
4306 reset_used_flags (i2notes
);
4307 reset_used_flags (i1notes
);
4308 reset_used_flags (i0notes
);
4309 reset_used_flags (newpat
);
4310 reset_used_flags (newi2pat
);
4311 if (undobuf
.other_insn
)
4312 reset_used_flags (PATTERN (undobuf
.other_insn
));
4314 i3notes
= copy_rtx_if_shared (i3notes
);
4315 i2notes
= copy_rtx_if_shared (i2notes
);
4316 i1notes
= copy_rtx_if_shared (i1notes
);
4317 i0notes
= copy_rtx_if_shared (i0notes
);
4318 newpat
= copy_rtx_if_shared (newpat
);
4319 newi2pat
= copy_rtx_if_shared (newi2pat
);
4320 if (undobuf
.other_insn
)
4321 reset_used_flags (PATTERN (undobuf
.other_insn
));
4323 INSN_CODE (i3
) = insn_code_number
;
4324 PATTERN (i3
) = newpat
;
4326 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4328 for (rtx link
= CALL_INSN_FUNCTION_USAGE (i3
); link
;
4329 link
= XEXP (link
, 1))
4333 /* I2SRC must still be meaningful at this point. Some
4334 splitting operations can invalidate I2SRC, but those
4335 operations do not apply to calls. */
4337 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4341 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4344 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4349 if (undobuf
.other_insn
)
4350 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4352 /* We had one special case above where I2 had more than one set and
4353 we replaced a destination of one of those sets with the destination
4354 of I3. In that case, we have to update LOG_LINKS of insns later
4355 in this basic block. Note that this (expensive) case is rare.
4357 Also, in this case, we must pretend that all REG_NOTEs for I2
4358 actually came from I3, so that REG_UNUSED notes from I2 will be
4359 properly handled. */
4361 if (i3_subst_into_i2
)
4363 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4364 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4365 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4366 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4367 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4368 && ! find_reg_note (i2
, REG_UNUSED
,
4369 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4370 for (temp_insn
= NEXT_INSN (i2
);
4372 && (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4373 || BB_HEAD (this_basic_block
) != temp_insn
);
4374 temp_insn
= NEXT_INSN (temp_insn
))
4375 if (temp_insn
!= i3
&& NONDEBUG_INSN_P (temp_insn
))
4376 FOR_EACH_LOG_LINK (link
, temp_insn
)
4377 if (link
->insn
== i2
)
4383 while (XEXP (link
, 1))
4384 link
= XEXP (link
, 1);
4385 XEXP (link
, 1) = i2notes
;
4392 LOG_LINKS (i3
) = NULL
;
4394 LOG_LINKS (i2
) = NULL
;
4399 if (MAY_HAVE_DEBUG_INSNS
&& i2scratch
)
4400 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4402 INSN_CODE (i2
) = i2_code_number
;
4403 PATTERN (i2
) = newi2pat
;
4407 if (MAY_HAVE_DEBUG_INSNS
&& i2src
)
4408 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4410 SET_INSN_DELETED (i2
);
4415 LOG_LINKS (i1
) = NULL
;
4417 if (MAY_HAVE_DEBUG_INSNS
)
4418 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
,
4420 SET_INSN_DELETED (i1
);
4425 LOG_LINKS (i0
) = NULL
;
4427 if (MAY_HAVE_DEBUG_INSNS
)
4428 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
,
4430 SET_INSN_DELETED (i0
);
4433 /* Get death notes for everything that is now used in either I3 or
4434 I2 and used to die in a previous insn. If we built two new
4435 patterns, move from I1 to I2 then I2 to I3 so that we get the
4436 proper movement on registers that I2 modifies. */
4439 from_luid
= DF_INSN_LUID (i0
);
4441 from_luid
= DF_INSN_LUID (i1
);
4443 from_luid
= DF_INSN_LUID (i2
);
4445 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4446 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4448 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4450 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL
,
4451 elim_i2
, elim_i1
, elim_i0
);
4453 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL
,
4454 elim_i2
, elim_i1
, elim_i0
);
4456 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL
,
4457 elim_i2
, local_elim_i1
, local_elim_i0
);
4459 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL
,
4460 elim_i2
, elim_i1
, local_elim_i0
);
4462 distribute_notes (midnotes
, NULL
, i3
, newi2pat
? i2
: NULL
,
4463 elim_i2
, elim_i1
, elim_i0
);
4465 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4466 know these are REG_UNUSED and want them to go to the desired insn,
4467 so we always pass it as i3. */
4469 if (newi2pat
&& new_i2_notes
)
4470 distribute_notes (new_i2_notes
, i2
, i2
, NULL
, NULL_RTX
, NULL_RTX
,
4474 distribute_notes (new_i3_notes
, i3
, i3
, NULL
, NULL_RTX
, NULL_RTX
,
4477 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4478 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4479 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4480 in that case, it might delete I2. Similarly for I2 and I1.
4481 Show an additional death due to the REG_DEAD note we make here. If
4482 we discard it in distribute_notes, we will decrement it again. */
4486 rtx new_note
= alloc_reg_note (REG_DEAD
, i3dest_killed
, NULL_RTX
);
4487 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4488 distribute_notes (new_note
, NULL
, i2
, NULL
, elim_i2
,
4491 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4492 elim_i2
, elim_i1
, elim_i0
);
4495 if (i2dest_in_i2src
)
4497 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4498 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4499 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4500 NULL_RTX
, NULL_RTX
);
4502 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4503 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4506 if (i1dest_in_i1src
)
4508 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4509 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4510 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4511 NULL_RTX
, NULL_RTX
);
4513 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4514 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4517 if (i0dest_in_i0src
)
4519 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4520 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4521 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4522 NULL_RTX
, NULL_RTX
);
4524 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4525 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4528 distribute_links (i3links
);
4529 distribute_links (i2links
);
4530 distribute_links (i1links
);
4531 distribute_links (i0links
);
4535 struct insn_link
*link
;
4536 rtx_insn
*i2_insn
= 0;
4537 rtx i2_val
= 0, set
;
4539 /* The insn that used to set this register doesn't exist, and
4540 this life of the register may not exist either. See if one of
4541 I3's links points to an insn that sets I2DEST. If it does,
4542 that is now the last known value for I2DEST. If we don't update
4543 this and I2 set the register to a value that depended on its old
4544 contents, we will get confused. If this insn is used, thing
4545 will be set correctly in combine_instructions. */
4546 FOR_EACH_LOG_LINK (link
, i3
)
4547 if ((set
= single_set (link
->insn
)) != 0
4548 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4549 i2_insn
= link
->insn
, i2_val
= SET_SRC (set
);
4551 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4553 /* If the reg formerly set in I2 died only once and that was in I3,
4554 zero its use count so it won't make `reload' do any work. */
4556 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4557 && ! i2dest_in_i2src
4558 && REGNO (i2dest
) < reg_n_sets_max
)
4559 INC_REG_N_SETS (REGNO (i2dest
), -1);
4562 if (i1
&& REG_P (i1dest
))
4564 struct insn_link
*link
;
4565 rtx_insn
*i1_insn
= 0;
4566 rtx i1_val
= 0, set
;
4568 FOR_EACH_LOG_LINK (link
, i3
)
4569 if ((set
= single_set (link
->insn
)) != 0
4570 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4571 i1_insn
= link
->insn
, i1_val
= SET_SRC (set
);
4573 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4576 && ! i1dest_in_i1src
4577 && REGNO (i1dest
) < reg_n_sets_max
)
4578 INC_REG_N_SETS (REGNO (i1dest
), -1);
4581 if (i0
&& REG_P (i0dest
))
4583 struct insn_link
*link
;
4584 rtx_insn
*i0_insn
= 0;
4585 rtx i0_val
= 0, set
;
4587 FOR_EACH_LOG_LINK (link
, i3
)
4588 if ((set
= single_set (link
->insn
)) != 0
4589 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4590 i0_insn
= link
->insn
, i0_val
= SET_SRC (set
);
4592 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4595 && ! i0dest_in_i0src
4596 && REGNO (i0dest
) < reg_n_sets_max
)
4597 INC_REG_N_SETS (REGNO (i0dest
), -1);
4600 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4601 been made to this insn. The order is important, because newi2pat
4602 can affect nonzero_bits of newpat. */
4604 note_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4605 note_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4608 if (undobuf
.other_insn
!= NULL_RTX
)
4612 fprintf (dump_file
, "modifying other_insn ");
4613 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4615 df_insn_rescan (undobuf
.other_insn
);
4618 if (i0
&& !(NOTE_P (i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4622 fprintf (dump_file
, "modifying insn i0 ");
4623 dump_insn_slim (dump_file
, i0
);
4625 df_insn_rescan (i0
);
4628 if (i1
&& !(NOTE_P (i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4632 fprintf (dump_file
, "modifying insn i1 ");
4633 dump_insn_slim (dump_file
, i1
);
4635 df_insn_rescan (i1
);
4638 if (i2
&& !(NOTE_P (i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4642 fprintf (dump_file
, "modifying insn i2 ");
4643 dump_insn_slim (dump_file
, i2
);
4645 df_insn_rescan (i2
);
4648 if (i3
&& !(NOTE_P (i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4652 fprintf (dump_file
, "modifying insn i3 ");
4653 dump_insn_slim (dump_file
, i3
);
4655 df_insn_rescan (i3
);
4658 /* Set new_direct_jump_p if a new return or simple jump instruction
4659 has been created. Adjust the CFG accordingly. */
4660 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4662 *new_direct_jump_p
= 1;
4663 mark_jump_label (PATTERN (i3
), i3
, 0);
4664 update_cfg_for_uncondjump (i3
);
4667 if (undobuf
.other_insn
!= NULL_RTX
4668 && (returnjump_p (undobuf
.other_insn
)
4669 || any_uncondjump_p (undobuf
.other_insn
)))
4671 *new_direct_jump_p
= 1;
4672 update_cfg_for_uncondjump (undobuf
.other_insn
);
4675 if (GET_CODE (PATTERN (i3
)) == TRAP_IF
4676 && XEXP (PATTERN (i3
), 0) == const1_rtx
)
4678 basic_block bb
= BLOCK_FOR_INSN (i3
);
4680 remove_edge (split_block (bb
, i3
));
4681 emit_barrier_after_bb (bb
);
4682 *new_direct_jump_p
= 1;
4685 if (undobuf
.other_insn
4686 && GET_CODE (PATTERN (undobuf
.other_insn
)) == TRAP_IF
4687 && XEXP (PATTERN (undobuf
.other_insn
), 0) == const1_rtx
)
4689 basic_block bb
= BLOCK_FOR_INSN (undobuf
.other_insn
);
4691 remove_edge (split_block (bb
, undobuf
.other_insn
));
4692 emit_barrier_after_bb (bb
);
4693 *new_direct_jump_p
= 1;
4696 /* A noop might also need cleaning up of CFG, if it comes from the
4697 simplification of a jump. */
4699 && GET_CODE (newpat
) == SET
4700 && SET_SRC (newpat
) == pc_rtx
4701 && SET_DEST (newpat
) == pc_rtx
)
4703 *new_direct_jump_p
= 1;
4704 update_cfg_for_uncondjump (i3
);
4707 if (undobuf
.other_insn
!= NULL_RTX
4708 && JUMP_P (undobuf
.other_insn
)
4709 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4710 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4711 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4713 *new_direct_jump_p
= 1;
4714 update_cfg_for_uncondjump (undobuf
.other_insn
);
4717 combine_successes
++;
4720 if (added_links_insn
4721 && (newi2pat
== 0 || DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i2
))
4722 && DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i3
))
4723 return added_links_insn
;
4725 return newi2pat
? i2
: i3
;
4728 /* Get a marker for undoing to the current state. */
4731 get_undo_marker (void)
4733 return undobuf
.undos
;
4736 /* Undo the modifications up to the marker. */
4739 undo_to_marker (void *marker
)
4741 struct undo
*undo
, *next
;
4743 for (undo
= undobuf
.undos
; undo
!= marker
; undo
= next
)
4751 *undo
->where
.r
= undo
->old_contents
.r
;
4754 *undo
->where
.i
= undo
->old_contents
.i
;
4757 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4760 *undo
->where
.l
= undo
->old_contents
.l
;
4766 undo
->next
= undobuf
.frees
;
4767 undobuf
.frees
= undo
;
4770 undobuf
.undos
= (struct undo
*) marker
;
4773 /* Undo all the modifications recorded in undobuf. */
4781 /* We've committed to accepting the changes we made. Move all
4782 of the undos to the free list. */
4787 struct undo
*undo
, *next
;
4789 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4792 undo
->next
= undobuf
.frees
;
4793 undobuf
.frees
= undo
;
4798 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4799 where we have an arithmetic expression and return that point. LOC will
4802 try_combine will call this function to see if an insn can be split into
4806 find_split_point (rtx
*loc
, rtx_insn
*insn
, bool set_src
)
4809 enum rtx_code code
= GET_CODE (x
);
4811 unsigned HOST_WIDE_INT len
= 0;
4812 HOST_WIDE_INT pos
= 0;
4814 rtx inner
= NULL_RTX
;
4815 scalar_int_mode mode
, inner_mode
;
4817 /* First special-case some codes. */
4821 #ifdef INSN_SCHEDULING
4822 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4824 if (MEM_P (SUBREG_REG (x
)))
4827 return find_split_point (&SUBREG_REG (x
), insn
, false);
4830 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4831 using LO_SUM and HIGH. */
4832 if (HAVE_lo_sum
&& (GET_CODE (XEXP (x
, 0)) == CONST
4833 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
))
4835 machine_mode address_mode
= get_address_mode (x
);
4838 gen_rtx_LO_SUM (address_mode
,
4839 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4841 return &XEXP (XEXP (x
, 0), 0);
4844 /* If we have a PLUS whose second operand is a constant and the
4845 address is not valid, perhaps will can split it up using
4846 the machine-specific way to split large constants. We use
4847 the first pseudo-reg (one of the virtual regs) as a placeholder;
4848 it will not remain in the result. */
4849 if (GET_CODE (XEXP (x
, 0)) == PLUS
4850 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4851 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4852 MEM_ADDR_SPACE (x
)))
4854 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4855 rtx_insn
*seq
= combine_split_insns (gen_rtx_SET (reg
, XEXP (x
, 0)),
4858 /* This should have produced two insns, each of which sets our
4859 placeholder. If the source of the second is a valid address,
4860 we can make put both sources together and make a split point
4864 && NEXT_INSN (seq
) != NULL_RTX
4865 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
4866 && NONJUMP_INSN_P (seq
)
4867 && GET_CODE (PATTERN (seq
)) == SET
4868 && SET_DEST (PATTERN (seq
)) == reg
4869 && ! reg_mentioned_p (reg
,
4870 SET_SRC (PATTERN (seq
)))
4871 && NONJUMP_INSN_P (NEXT_INSN (seq
))
4872 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
4873 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
4874 && memory_address_addr_space_p
4875 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
4876 MEM_ADDR_SPACE (x
)))
4878 rtx src1
= SET_SRC (PATTERN (seq
));
4879 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
4881 /* Replace the placeholder in SRC2 with SRC1. If we can
4882 find where in SRC2 it was placed, that can become our
4883 split point and we can replace this address with SRC2.
4884 Just try two obvious places. */
4886 src2
= replace_rtx (src2
, reg
, src1
);
4888 if (XEXP (src2
, 0) == src1
)
4889 split
= &XEXP (src2
, 0);
4890 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
4891 && XEXP (XEXP (src2
, 0), 0) == src1
)
4892 split
= &XEXP (XEXP (src2
, 0), 0);
4896 SUBST (XEXP (x
, 0), src2
);
4901 /* If that didn't work, perhaps the first operand is complex and
4902 needs to be computed separately, so make a split point there.
4903 This will occur on machines that just support REG + CONST
4904 and have a constant moved through some previous computation. */
4906 else if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
4907 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4908 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4909 return &XEXP (XEXP (x
, 0), 0);
4912 /* If we have a PLUS whose first operand is complex, try computing it
4913 separately by making a split there. */
4914 if (GET_CODE (XEXP (x
, 0)) == PLUS
4915 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4917 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
4918 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4919 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4920 return &XEXP (XEXP (x
, 0), 0);
4924 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4925 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4926 we need to put the operand into a register. So split at that
4929 if (SET_DEST (x
) == cc0_rtx
4930 && GET_CODE (SET_SRC (x
)) != COMPARE
4931 && GET_CODE (SET_SRC (x
)) != ZERO_EXTRACT
4932 && !OBJECT_P (SET_SRC (x
))
4933 && ! (GET_CODE (SET_SRC (x
)) == SUBREG
4934 && OBJECT_P (SUBREG_REG (SET_SRC (x
)))))
4935 return &SET_SRC (x
);
4937 /* See if we can split SET_SRC as it stands. */
4938 split
= find_split_point (&SET_SRC (x
), insn
, true);
4939 if (split
&& split
!= &SET_SRC (x
))
4942 /* See if we can split SET_DEST as it stands. */
4943 split
= find_split_point (&SET_DEST (x
), insn
, false);
4944 if (split
&& split
!= &SET_DEST (x
))
4947 /* See if this is a bitfield assignment with everything constant. If
4948 so, this is an IOR of an AND, so split it into that. */
4949 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
4950 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_DEST (x
), 0)),
4952 && HWI_COMPUTABLE_MODE_P (inner_mode
)
4953 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
4954 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
4955 && CONST_INT_P (SET_SRC (x
))
4956 && ((INTVAL (XEXP (SET_DEST (x
), 1))
4957 + INTVAL (XEXP (SET_DEST (x
), 2)))
4958 <= GET_MODE_PRECISION (inner_mode
))
4959 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
4961 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
4962 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
4963 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
));
4964 rtx dest
= XEXP (SET_DEST (x
), 0);
4965 unsigned HOST_WIDE_INT mask
4966 = (HOST_WIDE_INT_1U
<< len
) - 1;
4969 if (BITS_BIG_ENDIAN
)
4970 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
4972 or_mask
= gen_int_mode (src
<< pos
, inner_mode
);
4975 simplify_gen_binary (IOR
, inner_mode
, dest
, or_mask
));
4978 rtx negmask
= gen_int_mode (~(mask
<< pos
), inner_mode
);
4980 simplify_gen_binary (IOR
, inner_mode
,
4981 simplify_gen_binary (AND
, inner_mode
,
4986 SUBST (SET_DEST (x
), dest
);
4988 split
= find_split_point (&SET_SRC (x
), insn
, true);
4989 if (split
&& split
!= &SET_SRC (x
))
4993 /* Otherwise, see if this is an operation that we can split into two.
4994 If so, try to split that. */
4995 code
= GET_CODE (SET_SRC (x
));
5000 /* If we are AND'ing with a large constant that is only a single
5001 bit and the result is only being used in a context where we
5002 need to know if it is zero or nonzero, replace it with a bit
5003 extraction. This will avoid the large constant, which might
5004 have taken more than one insn to make. If the constant were
5005 not a valid argument to the AND but took only one insn to make,
5006 this is no worse, but if it took more than one insn, it will
5009 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
5010 && REG_P (XEXP (SET_SRC (x
), 0))
5011 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
5012 && REG_P (SET_DEST (x
))
5013 && (split
= find_single_use (SET_DEST (x
), insn
, NULL
)) != 0
5014 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
5015 && XEXP (*split
, 0) == SET_DEST (x
)
5016 && XEXP (*split
, 1) == const0_rtx
)
5018 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
5019 XEXP (SET_SRC (x
), 0),
5020 pos
, NULL_RTX
, 1, 1, 0, 0);
5021 if (extraction
!= 0)
5023 SUBST (SET_SRC (x
), extraction
);
5024 return find_split_point (loc
, insn
, false);
5030 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5031 is known to be on, this can be converted into a NEG of a shift. */
5032 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
5033 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
5034 && 1 <= (pos
= exact_log2
5035 (nonzero_bits (XEXP (SET_SRC (x
), 0),
5036 GET_MODE (XEXP (SET_SRC (x
), 0))))))
5038 machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
5042 gen_rtx_LSHIFTRT (mode
,
5043 XEXP (SET_SRC (x
), 0),
5046 split
= find_split_point (&SET_SRC (x
), insn
, true);
5047 if (split
&& split
!= &SET_SRC (x
))
5053 inner
= XEXP (SET_SRC (x
), 0);
5055 /* We can't optimize if either mode is a partial integer
5056 mode as we don't know how many bits are significant
5058 if (!is_int_mode (GET_MODE (inner
), &inner_mode
)
5059 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
5063 len
= GET_MODE_PRECISION (inner_mode
);
5069 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_SRC (x
), 0)),
5071 && CONST_INT_P (XEXP (SET_SRC (x
), 1))
5072 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
5074 inner
= XEXP (SET_SRC (x
), 0);
5075 len
= INTVAL (XEXP (SET_SRC (x
), 1));
5076 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
5078 if (BITS_BIG_ENDIAN
)
5079 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
5080 unsignedp
= (code
== ZERO_EXTRACT
);
5089 && pos
+ len
<= GET_MODE_PRECISION (GET_MODE (inner
))
5090 && is_a
<scalar_int_mode
> (GET_MODE (SET_SRC (x
)), &mode
))
5092 /* For unsigned, we have a choice of a shift followed by an
5093 AND or two shifts. Use two shifts for field sizes where the
5094 constant might be too large. We assume here that we can
5095 always at least get 8-bit constants in an AND insn, which is
5096 true for every current RISC. */
5098 if (unsignedp
&& len
<= 8)
5100 unsigned HOST_WIDE_INT mask
5101 = (HOST_WIDE_INT_1U
<< len
) - 1;
5105 (mode
, gen_lowpart (mode
, inner
),
5107 gen_int_mode (mask
, mode
)));
5109 split
= find_split_point (&SET_SRC (x
), insn
, true);
5110 if (split
&& split
!= &SET_SRC (x
))
5117 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
5118 gen_rtx_ASHIFT (mode
,
5119 gen_lowpart (mode
, inner
),
5120 GEN_INT (GET_MODE_PRECISION (mode
)
5122 GEN_INT (GET_MODE_PRECISION (mode
) - len
)));
5124 split
= find_split_point (&SET_SRC (x
), insn
, true);
5125 if (split
&& split
!= &SET_SRC (x
))
5130 /* See if this is a simple operation with a constant as the second
5131 operand. It might be that this constant is out of range and hence
5132 could be used as a split point. */
5133 if (BINARY_P (SET_SRC (x
))
5134 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
5135 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
5136 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
5137 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
5138 return &XEXP (SET_SRC (x
), 1);
5140 /* Finally, see if this is a simple operation with its first operand
5141 not in a register. The operation might require this operand in a
5142 register, so return it as a split point. We can always do this
5143 because if the first operand were another operation, we would have
5144 already found it as a split point. */
5145 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
5146 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
5147 return &XEXP (SET_SRC (x
), 0);
5153 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5154 it is better to write this as (not (ior A B)) so we can split it.
5155 Similarly for IOR. */
5156 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
5159 gen_rtx_NOT (GET_MODE (x
),
5160 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
5162 XEXP (XEXP (x
, 0), 0),
5163 XEXP (XEXP (x
, 1), 0))));
5164 return find_split_point (loc
, insn
, set_src
);
5167 /* Many RISC machines have a large set of logical insns. If the
5168 second operand is a NOT, put it first so we will try to split the
5169 other operand first. */
5170 if (GET_CODE (XEXP (x
, 1)) == NOT
)
5172 rtx tem
= XEXP (x
, 0);
5173 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5174 SUBST (XEXP (x
, 1), tem
);
5180 /* Canonicalization can produce (minus A (mult B C)), where C is a
5181 constant. It may be better to try splitting (plus (mult B -C) A)
5182 instead if this isn't a multiply by a power of two. */
5183 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
5184 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5185 && !pow2p_hwi (INTVAL (XEXP (XEXP (x
, 1), 1))))
5187 machine_mode mode
= GET_MODE (x
);
5188 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
5189 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
5190 SUBST (*loc
, gen_rtx_PLUS (mode
,
5192 XEXP (XEXP (x
, 1), 0),
5193 gen_int_mode (other_int
,
5196 return find_split_point (loc
, insn
, set_src
);
5199 /* Split at a multiply-accumulate instruction. However if this is
5200 the SET_SRC, we likely do not have such an instruction and it's
5201 worthless to try this split. */
5203 && (GET_CODE (XEXP (x
, 0)) == MULT
5204 || (GET_CODE (XEXP (x
, 0)) == ASHIFT
5205 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
5212 /* Otherwise, select our actions depending on our rtx class. */
5213 switch (GET_RTX_CLASS (code
))
5215 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5217 split
= find_split_point (&XEXP (x
, 2), insn
, false);
5222 case RTX_COMM_ARITH
:
5224 case RTX_COMM_COMPARE
:
5225 split
= find_split_point (&XEXP (x
, 1), insn
, false);
5230 /* Some machines have (and (shift ...) ...) insns. If X is not
5231 an AND, but XEXP (X, 0) is, use it as our split point. */
5232 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
5233 return &XEXP (x
, 0);
5235 split
= find_split_point (&XEXP (x
, 0), insn
, false);
5241 /* Otherwise, we don't have a split point. */
5246 /* Throughout X, replace FROM with TO, and return the result.
5247 The result is TO if X is FROM;
5248 otherwise the result is X, but its contents may have been modified.
5249 If they were modified, a record was made in undobuf so that
5250 undo_all will (among other things) return X to its original state.
5252 If the number of changes necessary is too much to record to undo,
5253 the excess changes are not made, so the result is invalid.
5254 The changes already made can still be undone.
5255 undobuf.num_undo is incremented for such changes, so by testing that
5256 the caller can tell whether the result is valid.
5258 `n_occurrences' is incremented each time FROM is replaced.
5260 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5262 IN_COND is nonzero if we are at the top level of a condition.
5264 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5265 by copying if `n_occurrences' is nonzero. */
5268 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int in_cond
, int unique_copy
)
5270 enum rtx_code code
= GET_CODE (x
);
5271 machine_mode op0_mode
= VOIDmode
;
5276 /* Two expressions are equal if they are identical copies of a shared
5277 RTX or if they are both registers with the same register number
5280 #define COMBINE_RTX_EQUAL_P(X,Y) \
5282 || (REG_P (X) && REG_P (Y) \
5283 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5285 /* Do not substitute into clobbers of regs -- this will never result in
5287 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
5290 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
5293 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
5296 /* If X and FROM are the same register but different modes, they
5297 will not have been seen as equal above. However, the log links code
5298 will make a LOG_LINKS entry for that case. If we do nothing, we
5299 will try to rerecognize our original insn and, when it succeeds,
5300 we will delete the feeding insn, which is incorrect.
5302 So force this insn not to match in this (rare) case. */
5303 if (! in_dest
&& code
== REG
&& REG_P (from
)
5304 && reg_overlap_mentioned_p (x
, from
))
5305 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5307 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5308 of which may contain things that can be combined. */
5309 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5312 /* It is possible to have a subexpression appear twice in the insn.
5313 Suppose that FROM is a register that appears within TO.
5314 Then, after that subexpression has been scanned once by `subst',
5315 the second time it is scanned, TO may be found. If we were
5316 to scan TO here, we would find FROM within it and create a
5317 self-referent rtl structure which is completely wrong. */
5318 if (COMBINE_RTX_EQUAL_P (x
, to
))
5321 /* Parallel asm_operands need special attention because all of the
5322 inputs are shared across the arms. Furthermore, unsharing the
5323 rtl results in recognition failures. Failure to handle this case
5324 specially can result in circular rtl.
5326 Solve this by doing a normal pass across the first entry of the
5327 parallel, and only processing the SET_DESTs of the subsequent
5330 if (code
== PARALLEL
5331 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5332 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5334 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, 0, unique_copy
);
5336 /* If this substitution failed, this whole thing fails. */
5337 if (GET_CODE (new_rtx
) == CLOBBER
5338 && XEXP (new_rtx
, 0) == const0_rtx
)
5341 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5343 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5345 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5348 && GET_CODE (dest
) != CC0
5349 && GET_CODE (dest
) != PC
)
5351 new_rtx
= subst (dest
, from
, to
, 0, 0, unique_copy
);
5353 /* If this substitution failed, this whole thing fails. */
5354 if (GET_CODE (new_rtx
) == CLOBBER
5355 && XEXP (new_rtx
, 0) == const0_rtx
)
5358 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5364 len
= GET_RTX_LENGTH (code
);
5365 fmt
= GET_RTX_FORMAT (code
);
5367 /* We don't need to process a SET_DEST that is a register, CC0,
5368 or PC, so set up to skip this common case. All other cases
5369 where we want to suppress replacing something inside a
5370 SET_SRC are handled via the IN_DEST operand. */
5372 && (REG_P (SET_DEST (x
))
5373 || GET_CODE (SET_DEST (x
)) == CC0
5374 || GET_CODE (SET_DEST (x
)) == PC
))
5377 /* Trying to simplify the operands of a widening MULT is not likely
5378 to create RTL matching a machine insn. */
5380 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
5381 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
5382 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
5383 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
5384 && REG_P (XEXP (XEXP (x
, 0), 0))
5385 && REG_P (XEXP (XEXP (x
, 1), 0))
5390 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5393 op0_mode
= GET_MODE (XEXP (x
, 0));
5395 for (i
= 0; i
< len
; i
++)
5400 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5402 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5404 new_rtx
= (unique_copy
&& n_occurrences
5405 ? copy_rtx (to
) : to
);
5410 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0, 0,
5413 /* If this substitution failed, this whole thing
5415 if (GET_CODE (new_rtx
) == CLOBBER
5416 && XEXP (new_rtx
, 0) == const0_rtx
)
5420 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5423 else if (fmt
[i
] == 'e')
5425 /* If this is a register being set, ignore it. */
5426 new_rtx
= XEXP (x
, i
);
5429 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5431 || code
== STRICT_LOW_PART
))
5434 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5436 /* In general, don't install a subreg involving two
5437 modes not tieable. It can worsen register
5438 allocation, and can even make invalid reload
5439 insns, since the reg inside may need to be copied
5440 from in the outside mode, and that may be invalid
5441 if it is an fp reg copied in integer mode.
5443 We allow two exceptions to this: It is valid if
5444 it is inside another SUBREG and the mode of that
5445 SUBREG and the mode of the inside of TO is
5446 tieable and it is valid if X is a SET that copies
5449 if (GET_CODE (to
) == SUBREG
5450 && !targetm
.modes_tieable_p (GET_MODE (to
),
5451 GET_MODE (SUBREG_REG (to
)))
5452 && ! (code
== SUBREG
5453 && (targetm
.modes_tieable_p
5454 (GET_MODE (x
), GET_MODE (SUBREG_REG (to
)))))
5458 && XEXP (x
, 0) == cc0_rtx
))))
5459 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5463 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5464 && simplify_subreg_regno (REGNO (to
), GET_MODE (to
),
5467 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5469 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5473 /* If we are in a SET_DEST, suppress most cases unless we
5474 have gone inside a MEM, in which case we want to
5475 simplify the address. We assume here that things that
5476 are actually part of the destination have their inner
5477 parts in the first expression. This is true for SUBREG,
5478 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5479 things aside from REG and MEM that should appear in a
5481 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5483 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5484 || code
== ZERO_EXTRACT
))
5487 code
== IF_THEN_ELSE
&& i
== 0,
5490 /* If we found that we will have to reject this combination,
5491 indicate that by returning the CLOBBER ourselves, rather than
5492 an expression containing it. This will speed things up as
5493 well as prevent accidents where two CLOBBERs are considered
5494 to be equal, thus producing an incorrect simplification. */
5496 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5499 if (GET_CODE (x
) == SUBREG
&& CONST_SCALAR_INT_P (new_rtx
))
5501 machine_mode mode
= GET_MODE (x
);
5503 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5504 GET_MODE (SUBREG_REG (x
)),
5507 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5509 else if (CONST_SCALAR_INT_P (new_rtx
)
5510 && GET_CODE (x
) == ZERO_EXTEND
)
5512 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
5513 new_rtx
, GET_MODE (XEXP (x
, 0)));
5517 SUBST (XEXP (x
, i
), new_rtx
);
5522 /* Check if we are loading something from the constant pool via float
5523 extension; in this case we would undo compress_float_constant
5524 optimization and degenerate constant load to an immediate value. */
5525 if (GET_CODE (x
) == FLOAT_EXTEND
5526 && MEM_P (XEXP (x
, 0))
5527 && MEM_READONLY_P (XEXP (x
, 0)))
5529 rtx tmp
= avoid_constant_pool_reference (x
);
5534 /* Try to simplify X. If the simplification changed the code, it is likely
5535 that further simplification will help, so loop, but limit the number
5536 of repetitions that will be performed. */
5538 for (i
= 0; i
< 4; i
++)
5540 /* If X is sufficiently simple, don't bother trying to do anything
5542 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5543 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
, in_cond
);
5545 if (GET_CODE (x
) == code
)
5548 code
= GET_CODE (x
);
5550 /* We no longer know the original mode of operand 0 since we
5551 have changed the form of X) */
5552 op0_mode
= VOIDmode
;
5558 /* If X is a commutative operation whose operands are not in the canonical
5559 order, use substitutions to swap them. */
5562 maybe_swap_commutative_operands (rtx x
)
5564 if (COMMUTATIVE_ARITH_P (x
)
5565 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5567 rtx temp
= XEXP (x
, 0);
5568 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5569 SUBST (XEXP (x
, 1), temp
);
5573 /* Simplify X, a piece of RTL. We just operate on the expression at the
5574 outer level; call `subst' to simplify recursively. Return the new
5577 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5578 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5582 combine_simplify_rtx (rtx x
, machine_mode op0_mode
, int in_dest
,
5585 enum rtx_code code
= GET_CODE (x
);
5586 machine_mode mode
= GET_MODE (x
);
5587 scalar_int_mode int_mode
;
5591 /* If this is a commutative operation, put a constant last and a complex
5592 expression first. We don't need to do this for comparisons here. */
5593 maybe_swap_commutative_operands (x
);
5595 /* Try to fold this expression in case we have constants that weren't
5598 switch (GET_RTX_CLASS (code
))
5601 if (op0_mode
== VOIDmode
)
5602 op0_mode
= GET_MODE (XEXP (x
, 0));
5603 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5606 case RTX_COMM_COMPARE
:
5608 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5609 if (cmp_mode
== VOIDmode
)
5611 cmp_mode
= GET_MODE (XEXP (x
, 1));
5612 if (cmp_mode
== VOIDmode
)
5613 cmp_mode
= op0_mode
;
5615 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5616 XEXP (x
, 0), XEXP (x
, 1));
5619 case RTX_COMM_ARITH
:
5621 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5623 case RTX_BITFIELD_OPS
:
5625 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5626 XEXP (x
, 1), XEXP (x
, 2));
5635 code
= GET_CODE (temp
);
5636 op0_mode
= VOIDmode
;
5637 mode
= GET_MODE (temp
);
5640 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5641 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5642 things. Check for cases where both arms are testing the same
5645 Don't do anything if all operands are very simple. */
5648 && ((!OBJECT_P (XEXP (x
, 0))
5649 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5650 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5651 || (!OBJECT_P (XEXP (x
, 1))
5652 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5653 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5655 && (!OBJECT_P (XEXP (x
, 0))
5656 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5657 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5659 rtx cond
, true_rtx
, false_rtx
;
5661 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5663 /* If everything is a comparison, what we have is highly unlikely
5664 to be simpler, so don't use it. */
5665 && ! (COMPARISON_P (x
)
5666 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
))))
5668 rtx cop1
= const0_rtx
;
5669 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5671 if (cond_code
== NE
&& COMPARISON_P (cond
))
5674 /* Simplify the alternative arms; this may collapse the true and
5675 false arms to store-flag values. Be careful to use copy_rtx
5676 here since true_rtx or false_rtx might share RTL with x as a
5677 result of the if_then_else_cond call above. */
5678 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5679 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5681 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5682 is unlikely to be simpler. */
5683 if (general_operand (true_rtx
, VOIDmode
)
5684 && general_operand (false_rtx
, VOIDmode
))
5686 enum rtx_code reversed
;
5688 /* Restarting if we generate a store-flag expression will cause
5689 us to loop. Just drop through in this case. */
5691 /* If the result values are STORE_FLAG_VALUE and zero, we can
5692 just make the comparison operation. */
5693 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5694 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5696 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5697 && ((reversed
= reversed_comparison_code_parts
5698 (cond_code
, cond
, cop1
, NULL
))
5700 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5703 /* Likewise, we can make the negate of a comparison operation
5704 if the result values are - STORE_FLAG_VALUE and zero. */
5705 else if (CONST_INT_P (true_rtx
)
5706 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5707 && false_rtx
== const0_rtx
)
5708 x
= simplify_gen_unary (NEG
, mode
,
5709 simplify_gen_relational (cond_code
,
5713 else if (CONST_INT_P (false_rtx
)
5714 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5715 && true_rtx
== const0_rtx
5716 && ((reversed
= reversed_comparison_code_parts
5717 (cond_code
, cond
, cop1
, NULL
))
5719 x
= simplify_gen_unary (NEG
, mode
,
5720 simplify_gen_relational (reversed
,
5725 return gen_rtx_IF_THEN_ELSE (mode
,
5726 simplify_gen_relational (cond_code
,
5731 true_rtx
, false_rtx
);
5733 code
= GET_CODE (x
);
5734 op0_mode
= VOIDmode
;
5739 /* First see if we can apply the inverse distributive law. */
5740 if (code
== PLUS
|| code
== MINUS
5741 || code
== AND
|| code
== IOR
|| code
== XOR
)
5743 x
= apply_distributive_law (x
);
5744 code
= GET_CODE (x
);
5745 op0_mode
= VOIDmode
;
5748 /* If CODE is an associative operation not otherwise handled, see if we
5749 can associate some operands. This can win if they are constants or
5750 if they are logically related (i.e. (a & b) & a). */
5751 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5752 || code
== AND
|| code
== IOR
|| code
== XOR
5753 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5754 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5755 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5757 if (GET_CODE (XEXP (x
, 0)) == code
)
5759 rtx other
= XEXP (XEXP (x
, 0), 0);
5760 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5761 rtx inner_op1
= XEXP (x
, 1);
5764 /* Make sure we pass the constant operand if any as the second
5765 one if this is a commutative operation. */
5766 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5767 std::swap (inner_op0
, inner_op1
);
5768 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5769 : code
== DIV
? MULT
5771 mode
, inner_op0
, inner_op1
);
5773 /* For commutative operations, try the other pair if that one
5775 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5777 other
= XEXP (XEXP (x
, 0), 1);
5778 inner
= simplify_binary_operation (code
, mode
,
5779 XEXP (XEXP (x
, 0), 0),
5784 return simplify_gen_binary (code
, mode
, other
, inner
);
5788 /* A little bit of algebraic simplification here. */
5792 /* Ensure that our address has any ASHIFTs converted to MULT in case
5793 address-recognizing predicates are called later. */
5794 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5795 SUBST (XEXP (x
, 0), temp
);
5799 if (op0_mode
== VOIDmode
)
5800 op0_mode
= GET_MODE (SUBREG_REG (x
));
5802 /* See if this can be moved to simplify_subreg. */
5803 if (CONSTANT_P (SUBREG_REG (x
))
5804 && subreg_lowpart_offset (mode
, op0_mode
) == SUBREG_BYTE (x
)
5805 /* Don't call gen_lowpart if the inner mode
5806 is VOIDmode and we cannot simplify it, as SUBREG without
5807 inner mode is invalid. */
5808 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5809 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5810 return gen_lowpart (mode
, SUBREG_REG (x
));
5812 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
5816 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
5821 /* If op is known to have all lower bits zero, the result is zero. */
5822 scalar_int_mode int_mode
, int_op0_mode
;
5824 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5825 && is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
)
5826 && (GET_MODE_PRECISION (int_mode
)
5827 < GET_MODE_PRECISION (int_op0_mode
))
5828 && (subreg_lowpart_offset (int_mode
, int_op0_mode
)
5830 && HWI_COMPUTABLE_MODE_P (int_op0_mode
)
5831 && (nonzero_bits (SUBREG_REG (x
), int_op0_mode
)
5832 & GET_MODE_MASK (int_mode
)) == 0)
5833 return CONST0_RTX (int_mode
);
5836 /* Don't change the mode of the MEM if that would change the meaning
5838 if (MEM_P (SUBREG_REG (x
))
5839 && (MEM_VOLATILE_P (SUBREG_REG (x
))
5840 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0),
5841 MEM_ADDR_SPACE (SUBREG_REG (x
)))))
5842 return gen_rtx_CLOBBER (mode
, const0_rtx
);
5844 /* Note that we cannot do any narrowing for non-constants since
5845 we might have been counting on using the fact that some bits were
5846 zero. We now do this in the SET. */
5851 temp
= expand_compound_operation (XEXP (x
, 0));
5853 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5854 replaced by (lshiftrt X C). This will convert
5855 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5857 if (GET_CODE (temp
) == ASHIFTRT
5858 && CONST_INT_P (XEXP (temp
, 1))
5859 && INTVAL (XEXP (temp
, 1)) == GET_MODE_PRECISION (mode
) - 1)
5860 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
5861 INTVAL (XEXP (temp
, 1)));
5863 /* If X has only a single bit that might be nonzero, say, bit I, convert
5864 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5865 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5866 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5867 or a SUBREG of one since we'd be making the expression more
5868 complex if it was just a register. */
5871 && ! (GET_CODE (temp
) == SUBREG
5872 && REG_P (SUBREG_REG (temp
)))
5873 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5874 && (i
= exact_log2 (nonzero_bits (temp
, int_mode
))) >= 0)
5876 rtx temp1
= simplify_shift_const
5877 (NULL_RTX
, ASHIFTRT
, int_mode
,
5878 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
, temp
,
5879 GET_MODE_PRECISION (int_mode
) - 1 - i
),
5880 GET_MODE_PRECISION (int_mode
) - 1 - i
);
5882 /* If all we did was surround TEMP with the two shifts, we
5883 haven't improved anything, so don't use it. Otherwise,
5884 we are better off with TEMP1. */
5885 if (GET_CODE (temp1
) != ASHIFTRT
5886 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
5887 || XEXP (XEXP (temp1
, 0), 0) != temp
)
5893 /* We can't handle truncation to a partial integer mode here
5894 because we don't know the real bitsize of the partial
5896 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
5899 if (HWI_COMPUTABLE_MODE_P (mode
))
5901 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5902 GET_MODE_MASK (mode
), 0));
5904 /* We can truncate a constant value and return it. */
5905 if (CONST_INT_P (XEXP (x
, 0)))
5906 return gen_int_mode (INTVAL (XEXP (x
, 0)), mode
);
5908 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5909 whose value is a comparison can be replaced with a subreg if
5910 STORE_FLAG_VALUE permits. */
5911 if (HWI_COMPUTABLE_MODE_P (mode
)
5912 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
5913 && (temp
= get_last_value (XEXP (x
, 0)))
5914 && COMPARISON_P (temp
))
5915 return gen_lowpart (mode
, XEXP (x
, 0));
5919 /* (const (const X)) can become (const X). Do it this way rather than
5920 returning the inner CONST since CONST can be shared with a
5922 if (GET_CODE (XEXP (x
, 0)) == CONST
)
5923 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
5927 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5928 can add in an offset. find_split_point will split this address up
5929 again if it doesn't match. */
5930 if (HAVE_lo_sum
&& GET_CODE (XEXP (x
, 0)) == HIGH
5931 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
5936 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5937 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5938 bit-field and can be replaced by either a sign_extend or a
5939 sign_extract. The `and' may be a zero_extend and the two
5940 <c>, -<c> constants may be reversed. */
5941 if (GET_CODE (XEXP (x
, 0)) == XOR
5942 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5943 && CONST_INT_P (XEXP (x
, 1))
5944 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
5945 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
5946 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
5947 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
5948 && HWI_COMPUTABLE_MODE_P (int_mode
)
5949 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
5950 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5951 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5952 == (HOST_WIDE_INT_1U
<< (i
+ 1)) - 1))
5953 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
5954 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))
5955 == (unsigned int) i
+ 1))))
5956 return simplify_shift_const
5957 (NULL_RTX
, ASHIFTRT
, int_mode
,
5958 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
5959 XEXP (XEXP (XEXP (x
, 0), 0), 0),
5960 GET_MODE_PRECISION (int_mode
) - (i
+ 1)),
5961 GET_MODE_PRECISION (int_mode
) - (i
+ 1));
5963 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5964 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5965 the bitsize of the mode - 1. This allows simplification of
5966 "a = (b & 8) == 0;" */
5967 if (XEXP (x
, 1) == constm1_rtx
5968 && !REG_P (XEXP (x
, 0))
5969 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5970 && REG_P (SUBREG_REG (XEXP (x
, 0))))
5971 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5972 && nonzero_bits (XEXP (x
, 0), int_mode
) == 1)
5973 return simplify_shift_const
5974 (NULL_RTX
, ASHIFTRT
, int_mode
,
5975 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
5976 gen_rtx_XOR (int_mode
, XEXP (x
, 0),
5978 GET_MODE_PRECISION (int_mode
) - 1),
5979 GET_MODE_PRECISION (int_mode
) - 1);
5981 /* If we are adding two things that have no bits in common, convert
5982 the addition into an IOR. This will often be further simplified,
5983 for example in cases like ((a & 1) + (a & 2)), which can
5986 if (HWI_COMPUTABLE_MODE_P (mode
)
5987 && (nonzero_bits (XEXP (x
, 0), mode
)
5988 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
5990 /* Try to simplify the expression further. */
5991 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5992 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
, 0);
5994 /* If we could, great. If not, do not go ahead with the IOR
5995 replacement, since PLUS appears in many special purpose
5996 address arithmetic instructions. */
5997 if (GET_CODE (temp
) != CLOBBER
5998 && (GET_CODE (temp
) != IOR
5999 || ((XEXP (temp
, 0) != XEXP (x
, 0)
6000 || XEXP (temp
, 1) != XEXP (x
, 1))
6001 && (XEXP (temp
, 0) != XEXP (x
, 1)
6002 || XEXP (temp
, 1) != XEXP (x
, 0)))))
6006 /* Canonicalize x + x into x << 1. */
6007 if (GET_MODE_CLASS (mode
) == MODE_INT
6008 && rtx_equal_p (XEXP (x
, 0), XEXP (x
, 1))
6009 && !side_effects_p (XEXP (x
, 0)))
6010 return simplify_gen_binary (ASHIFT
, mode
, XEXP (x
, 0), const1_rtx
);
6015 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6016 (and <foo> (const_int pow2-1)) */
6017 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6018 && GET_CODE (XEXP (x
, 1)) == AND
6019 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
6020 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x
, 1), 1)))
6021 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
6022 return simplify_and_const_int (NULL_RTX
, int_mode
, XEXP (x
, 0),
6023 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
6027 /* If we have (mult (plus A B) C), apply the distributive law and then
6028 the inverse distributive law to see if things simplify. This
6029 occurs mostly in addresses, often when unrolling loops. */
6031 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
6033 rtx result
= distribute_and_simplify_rtx (x
, 0);
6038 /* Try simplify a*(b/c) as (a*b)/c. */
6039 if (FLOAT_MODE_P (mode
) && flag_associative_math
6040 && GET_CODE (XEXP (x
, 0)) == DIV
)
6042 rtx tem
= simplify_binary_operation (MULT
, mode
,
6043 XEXP (XEXP (x
, 0), 0),
6046 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
6051 /* If this is a divide by a power of two, treat it as a shift if
6052 its first operand is a shift. */
6053 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6054 && CONST_INT_P (XEXP (x
, 1))
6055 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
6056 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
6057 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
6058 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
6059 || GET_CODE (XEXP (x
, 0)) == ROTATE
6060 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
6061 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, int_mode
,
6066 case GT
: case GTU
: case GE
: case GEU
:
6067 case LT
: case LTU
: case LE
: case LEU
:
6068 case UNEQ
: case LTGT
:
6069 case UNGT
: case UNGE
:
6070 case UNLT
: case UNLE
:
6071 case UNORDERED
: case ORDERED
:
6072 /* If the first operand is a condition code, we can't do anything
6074 if (GET_CODE (XEXP (x
, 0)) == COMPARE
6075 || (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
6076 && ! CC0_P (XEXP (x
, 0))))
6078 rtx op0
= XEXP (x
, 0);
6079 rtx op1
= XEXP (x
, 1);
6080 enum rtx_code new_code
;
6082 if (GET_CODE (op0
) == COMPARE
)
6083 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
6085 /* Simplify our comparison, if possible. */
6086 new_code
= simplify_comparison (code
, &op0
, &op1
);
6088 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6089 if only the low-order bit is possibly nonzero in X (such as when
6090 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6091 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6092 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6095 Remove any ZERO_EXTRACT we made when thinking this was a
6096 comparison. It may now be simpler to use, e.g., an AND. If a
6097 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6098 the call to make_compound_operation in the SET case.
6100 Don't apply these optimizations if the caller would
6101 prefer a comparison rather than a value.
6102 E.g., for the condition in an IF_THEN_ELSE most targets need
6103 an explicit comparison. */
6108 else if (STORE_FLAG_VALUE
== 1
6110 && is_int_mode (mode
, &int_mode
)
6111 && op1
== const0_rtx
6112 && int_mode
== GET_MODE (op0
)
6113 && nonzero_bits (op0
, int_mode
) == 1)
6114 return gen_lowpart (int_mode
,
6115 expand_compound_operation (op0
));
6117 else if (STORE_FLAG_VALUE
== 1
6119 && is_int_mode (mode
, &int_mode
)
6120 && op1
== const0_rtx
6121 && int_mode
== GET_MODE (op0
)
6122 && (num_sign_bit_copies (op0
, int_mode
)
6123 == GET_MODE_PRECISION (int_mode
)))
6125 op0
= expand_compound_operation (op0
);
6126 return simplify_gen_unary (NEG
, int_mode
,
6127 gen_lowpart (int_mode
, op0
),
6131 else if (STORE_FLAG_VALUE
== 1
6133 && is_int_mode (mode
, &int_mode
)
6134 && op1
== const0_rtx
6135 && int_mode
== GET_MODE (op0
)
6136 && nonzero_bits (op0
, int_mode
) == 1)
6138 op0
= expand_compound_operation (op0
);
6139 return simplify_gen_binary (XOR
, int_mode
,
6140 gen_lowpart (int_mode
, op0
),
6144 else if (STORE_FLAG_VALUE
== 1
6146 && is_int_mode (mode
, &int_mode
)
6147 && op1
== const0_rtx
6148 && int_mode
== GET_MODE (op0
)
6149 && (num_sign_bit_copies (op0
, int_mode
)
6150 == GET_MODE_PRECISION (int_mode
)))
6152 op0
= expand_compound_operation (op0
);
6153 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), 1);
6156 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6161 else if (STORE_FLAG_VALUE
== -1
6163 && is_int_mode (mode
, &int_mode
)
6164 && op1
== const0_rtx
6165 && int_mode
== GET_MODE (op0
)
6166 && (num_sign_bit_copies (op0
, int_mode
)
6167 == GET_MODE_PRECISION (int_mode
)))
6168 return gen_lowpart (int_mode
, expand_compound_operation (op0
));
6170 else if (STORE_FLAG_VALUE
== -1
6172 && is_int_mode (mode
, &int_mode
)
6173 && op1
== const0_rtx
6174 && int_mode
== GET_MODE (op0
)
6175 && nonzero_bits (op0
, int_mode
) == 1)
6177 op0
= expand_compound_operation (op0
);
6178 return simplify_gen_unary (NEG
, int_mode
,
6179 gen_lowpart (int_mode
, op0
),
6183 else if (STORE_FLAG_VALUE
== -1
6185 && is_int_mode (mode
, &int_mode
)
6186 && op1
== const0_rtx
6187 && int_mode
== GET_MODE (op0
)
6188 && (num_sign_bit_copies (op0
, int_mode
)
6189 == GET_MODE_PRECISION (int_mode
)))
6191 op0
= expand_compound_operation (op0
);
6192 return simplify_gen_unary (NOT
, int_mode
,
6193 gen_lowpart (int_mode
, op0
),
6197 /* If X is 0/1, (eq X 0) is X-1. */
6198 else if (STORE_FLAG_VALUE
== -1
6200 && is_int_mode (mode
, &int_mode
)
6201 && op1
== const0_rtx
6202 && int_mode
== GET_MODE (op0
)
6203 && nonzero_bits (op0
, int_mode
) == 1)
6205 op0
= expand_compound_operation (op0
);
6206 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), -1);
6209 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6210 one bit that might be nonzero, we can convert (ne x 0) to
6211 (ashift x c) where C puts the bit in the sign bit. Remove any
6212 AND with STORE_FLAG_VALUE when we are done, since we are only
6213 going to test the sign bit. */
6215 && is_int_mode (mode
, &int_mode
)
6216 && HWI_COMPUTABLE_MODE_P (int_mode
)
6217 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
6218 && op1
== const0_rtx
6219 && int_mode
== GET_MODE (op0
)
6220 && (i
= exact_log2 (nonzero_bits (op0
, int_mode
))) >= 0)
6222 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6223 expand_compound_operation (op0
),
6224 GET_MODE_PRECISION (int_mode
) - 1 - i
);
6225 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
6231 /* If the code changed, return a whole new comparison.
6232 We also need to avoid using SUBST in cases where
6233 simplify_comparison has widened a comparison with a CONST_INT,
6234 since in that case the wider CONST_INT may fail the sanity
6235 checks in do_SUBST. */
6236 if (new_code
!= code
6237 || (CONST_INT_P (op1
)
6238 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 0))
6239 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 1))))
6240 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
6242 /* Otherwise, keep this operation, but maybe change its operands.
6243 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6244 SUBST (XEXP (x
, 0), op0
);
6245 SUBST (XEXP (x
, 1), op1
);
6250 return simplify_if_then_else (x
);
6256 /* If we are processing SET_DEST, we are done. */
6260 return expand_compound_operation (x
);
6263 return simplify_set (x
);
6267 return simplify_logical (x
);
6274 /* If this is a shift by a constant amount, simplify it. */
6275 if (CONST_INT_P (XEXP (x
, 1)))
6276 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
6277 INTVAL (XEXP (x
, 1)));
6279 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
6281 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
6283 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x
))))
6295 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6298 simplify_if_then_else (rtx x
)
6300 machine_mode mode
= GET_MODE (x
);
6301 rtx cond
= XEXP (x
, 0);
6302 rtx true_rtx
= XEXP (x
, 1);
6303 rtx false_rtx
= XEXP (x
, 2);
6304 enum rtx_code true_code
= GET_CODE (cond
);
6305 int comparison_p
= COMPARISON_P (cond
);
6308 enum rtx_code false_code
;
6310 scalar_int_mode int_mode
, inner_mode
;
6312 /* Simplify storing of the truth value. */
6313 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
6314 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
6315 XEXP (cond
, 0), XEXP (cond
, 1));
6317 /* Also when the truth value has to be reversed. */
6319 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
6320 && (reversed
= reversed_comparison (cond
, mode
)))
6323 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6324 in it is being compared against certain values. Get the true and false
6325 comparisons and see if that says anything about the value of each arm. */
6328 && ((false_code
= reversed_comparison_code (cond
, NULL
))
6330 && REG_P (XEXP (cond
, 0)))
6333 rtx from
= XEXP (cond
, 0);
6334 rtx true_val
= XEXP (cond
, 1);
6335 rtx false_val
= true_val
;
6338 /* If FALSE_CODE is EQ, swap the codes and arms. */
6340 if (false_code
== EQ
)
6342 swapped
= 1, true_code
= EQ
, false_code
= NE
;
6343 std::swap (true_rtx
, false_rtx
);
6346 scalar_int_mode from_mode
;
6347 if (is_a
<scalar_int_mode
> (GET_MODE (from
), &from_mode
))
6349 /* If we are comparing against zero and the expression being
6350 tested has only a single bit that might be nonzero, that is
6351 its value when it is not equal to zero. Similarly if it is
6352 known to be -1 or 0. */
6354 && true_val
== const0_rtx
6355 && pow2p_hwi (nzb
= nonzero_bits (from
, from_mode
)))
6358 false_val
= gen_int_mode (nzb
, from_mode
);
6360 else if (true_code
== EQ
6361 && true_val
== const0_rtx
6362 && (num_sign_bit_copies (from
, from_mode
)
6363 == GET_MODE_PRECISION (from_mode
)))
6366 false_val
= constm1_rtx
;
6370 /* Now simplify an arm if we know the value of the register in the
6371 branch and it is used in the arm. Be careful due to the potential
6372 of locally-shared RTL. */
6374 if (reg_mentioned_p (from
, true_rtx
))
6375 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
6377 pc_rtx
, pc_rtx
, 0, 0, 0);
6378 if (reg_mentioned_p (from
, false_rtx
))
6379 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
6381 pc_rtx
, pc_rtx
, 0, 0, 0);
6383 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
6384 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
6386 true_rtx
= XEXP (x
, 1);
6387 false_rtx
= XEXP (x
, 2);
6388 true_code
= GET_CODE (cond
);
6391 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6392 reversed, do so to avoid needing two sets of patterns for
6393 subtract-and-branch insns. Similarly if we have a constant in the true
6394 arm, the false arm is the same as the first operand of the comparison, or
6395 the false arm is more complicated than the true arm. */
6398 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6399 && (true_rtx
== pc_rtx
6400 || (CONSTANT_P (true_rtx
)
6401 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6402 || true_rtx
== const0_rtx
6403 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6404 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6405 && !OBJECT_P (false_rtx
))
6406 || reg_mentioned_p (true_rtx
, false_rtx
)
6407 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6409 true_code
= reversed_comparison_code (cond
, NULL
);
6410 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6411 SUBST (XEXP (x
, 1), false_rtx
);
6412 SUBST (XEXP (x
, 2), true_rtx
);
6414 std::swap (true_rtx
, false_rtx
);
6417 /* It is possible that the conditional has been simplified out. */
6418 true_code
= GET_CODE (cond
);
6419 comparison_p
= COMPARISON_P (cond
);
6422 /* If the two arms are identical, we don't need the comparison. */
6424 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6427 /* Convert a == b ? b : a to "a". */
6428 if (true_code
== EQ
&& ! side_effects_p (cond
)
6429 && !HONOR_NANS (mode
)
6430 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6431 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6433 else if (true_code
== NE
&& ! side_effects_p (cond
)
6434 && !HONOR_NANS (mode
)
6435 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6436 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6439 /* Look for cases where we have (abs x) or (neg (abs X)). */
6441 if (GET_MODE_CLASS (mode
) == MODE_INT
6443 && XEXP (cond
, 1) == const0_rtx
6444 && GET_CODE (false_rtx
) == NEG
6445 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6446 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6447 && ! side_effects_p (true_rtx
))
6452 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6456 simplify_gen_unary (NEG
, mode
,
6457 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6463 /* Look for MIN or MAX. */
6465 if ((! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
6467 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6468 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6469 && ! side_effects_p (cond
))
6474 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6477 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6480 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6483 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6488 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6489 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6490 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6491 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6492 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6493 neither 1 or -1, but it isn't worth checking for. */
6495 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6497 && is_int_mode (mode
, &int_mode
)
6498 && ! side_effects_p (x
))
6500 rtx t
= make_compound_operation (true_rtx
, SET
);
6501 rtx f
= make_compound_operation (false_rtx
, SET
);
6502 rtx cond_op0
= XEXP (cond
, 0);
6503 rtx cond_op1
= XEXP (cond
, 1);
6504 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6505 scalar_int_mode m
= int_mode
;
6506 rtx z
= 0, c1
= NULL_RTX
;
6508 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6509 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6510 || GET_CODE (t
) == ASHIFT
6511 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6512 && rtx_equal_p (XEXP (t
, 0), f
))
6513 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6515 /* If an identity-zero op is commutative, check whether there
6516 would be a match if we swapped the operands. */
6517 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6518 || GET_CODE (t
) == XOR
)
6519 && rtx_equal_p (XEXP (t
, 1), f
))
6520 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6521 else if (GET_CODE (t
) == SIGN_EXTEND
6522 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6523 && (GET_CODE (XEXP (t
, 0)) == PLUS
6524 || GET_CODE (XEXP (t
, 0)) == MINUS
6525 || GET_CODE (XEXP (t
, 0)) == IOR
6526 || GET_CODE (XEXP (t
, 0)) == XOR
6527 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6528 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6529 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6530 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6531 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6532 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6533 && (num_sign_bit_copies (f
, GET_MODE (f
))
6535 (GET_MODE_PRECISION (int_mode
)
6536 - GET_MODE_PRECISION (inner_mode
))))
6538 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6539 extend_op
= SIGN_EXTEND
;
6542 else if (GET_CODE (t
) == SIGN_EXTEND
6543 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6544 && (GET_CODE (XEXP (t
, 0)) == PLUS
6545 || GET_CODE (XEXP (t
, 0)) == IOR
6546 || GET_CODE (XEXP (t
, 0)) == XOR
)
6547 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6548 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6549 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6550 && (num_sign_bit_copies (f
, GET_MODE (f
))
6552 (GET_MODE_PRECISION (int_mode
)
6553 - GET_MODE_PRECISION (inner_mode
))))
6555 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6556 extend_op
= SIGN_EXTEND
;
6559 else if (GET_CODE (t
) == ZERO_EXTEND
6560 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6561 && (GET_CODE (XEXP (t
, 0)) == PLUS
6562 || GET_CODE (XEXP (t
, 0)) == MINUS
6563 || GET_CODE (XEXP (t
, 0)) == IOR
6564 || GET_CODE (XEXP (t
, 0)) == XOR
6565 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6566 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6567 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6568 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6569 && HWI_COMPUTABLE_MODE_P (int_mode
)
6570 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6571 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6572 && ((nonzero_bits (f
, GET_MODE (f
))
6573 & ~GET_MODE_MASK (inner_mode
))
6576 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6577 extend_op
= ZERO_EXTEND
;
6580 else if (GET_CODE (t
) == ZERO_EXTEND
6581 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6582 && (GET_CODE (XEXP (t
, 0)) == PLUS
6583 || GET_CODE (XEXP (t
, 0)) == IOR
6584 || GET_CODE (XEXP (t
, 0)) == XOR
)
6585 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6586 && HWI_COMPUTABLE_MODE_P (int_mode
)
6587 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6588 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6589 && ((nonzero_bits (f
, GET_MODE (f
))
6590 & ~GET_MODE_MASK (inner_mode
))
6593 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6594 extend_op
= ZERO_EXTEND
;
6600 temp
= subst (simplify_gen_relational (true_code
, m
, VOIDmode
,
6601 cond_op0
, cond_op1
),
6602 pc_rtx
, pc_rtx
, 0, 0, 0);
6603 temp
= simplify_gen_binary (MULT
, m
, temp
,
6604 simplify_gen_binary (MULT
, m
, c1
,
6606 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0, 0);
6607 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6609 if (extend_op
!= UNKNOWN
)
6610 temp
= simplify_gen_unary (extend_op
, int_mode
, temp
, m
);
6616 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6617 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6618 negation of a single bit, we can convert this operation to a shift. We
6619 can actually do this more generally, but it doesn't seem worth it. */
6622 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6623 && XEXP (cond
, 1) == const0_rtx
6624 && false_rtx
== const0_rtx
6625 && CONST_INT_P (true_rtx
)
6626 && ((1 == nonzero_bits (XEXP (cond
, 0), int_mode
)
6627 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6628 || ((num_sign_bit_copies (XEXP (cond
, 0), int_mode
)
6629 == GET_MODE_PRECISION (int_mode
))
6630 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6632 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6633 gen_lowpart (int_mode
, XEXP (cond
, 0)), i
);
6635 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6636 non-zero bit in A is C1. */
6637 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6638 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6639 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6640 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (cond
, 0)), &inner_mode
)
6641 && (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))
6642 == nonzero_bits (XEXP (cond
, 0), inner_mode
)
6643 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))) >= 0)
6645 rtx val
= XEXP (cond
, 0);
6646 if (inner_mode
== int_mode
)
6648 else if (GET_MODE_PRECISION (inner_mode
) < GET_MODE_PRECISION (int_mode
))
6649 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, val
, inner_mode
);
6655 /* Simplify X, a SET expression. Return the new expression. */
6658 simplify_set (rtx x
)
6660 rtx src
= SET_SRC (x
);
6661 rtx dest
= SET_DEST (x
);
6663 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6664 rtx_insn
*other_insn
;
6666 scalar_int_mode int_mode
;
6668 /* (set (pc) (return)) gets written as (return). */
6669 if (GET_CODE (dest
) == PC
&& ANY_RETURN_P (src
))
6672 /* Now that we know for sure which bits of SRC we are using, see if we can
6673 simplify the expression for the object knowing that we only need the
6676 if (GET_MODE_CLASS (mode
) == MODE_INT
&& HWI_COMPUTABLE_MODE_P (mode
))
6678 src
= force_to_mode (src
, mode
, HOST_WIDE_INT_M1U
, 0);
6679 SUBST (SET_SRC (x
), src
);
6682 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6683 the comparison result and try to simplify it unless we already have used
6684 undobuf.other_insn. */
6685 if ((GET_MODE_CLASS (mode
) == MODE_CC
6686 || GET_CODE (src
) == COMPARE
6688 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6689 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6690 && COMPARISON_P (*cc_use
)
6691 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6693 enum rtx_code old_code
= GET_CODE (*cc_use
);
6694 enum rtx_code new_code
;
6696 int other_changed
= 0;
6697 rtx inner_compare
= NULL_RTX
;
6698 machine_mode compare_mode
= GET_MODE (dest
);
6700 if (GET_CODE (src
) == COMPARE
)
6702 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6703 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6705 inner_compare
= op0
;
6706 op0
= XEXP (inner_compare
, 0), op1
= XEXP (inner_compare
, 1);
6710 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6712 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6715 new_code
= old_code
;
6716 else if (!CONSTANT_P (tmp
))
6718 new_code
= GET_CODE (tmp
);
6719 op0
= XEXP (tmp
, 0);
6720 op1
= XEXP (tmp
, 1);
6724 rtx pat
= PATTERN (other_insn
);
6725 undobuf
.other_insn
= other_insn
;
6726 SUBST (*cc_use
, tmp
);
6728 /* Attempt to simplify CC user. */
6729 if (GET_CODE (pat
) == SET
)
6731 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6732 if (new_rtx
!= NULL_RTX
)
6733 SUBST (SET_SRC (pat
), new_rtx
);
6736 /* Convert X into a no-op move. */
6737 SUBST (SET_DEST (x
), pc_rtx
);
6738 SUBST (SET_SRC (x
), pc_rtx
);
6742 /* Simplify our comparison, if possible. */
6743 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6745 #ifdef SELECT_CC_MODE
6746 /* If this machine has CC modes other than CCmode, check to see if we
6747 need to use a different CC mode here. */
6748 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6749 compare_mode
= GET_MODE (op0
);
6750 else if (inner_compare
6751 && GET_MODE_CLASS (GET_MODE (inner_compare
)) == MODE_CC
6752 && new_code
== old_code
6753 && op0
== XEXP (inner_compare
, 0)
6754 && op1
== XEXP (inner_compare
, 1))
6755 compare_mode
= GET_MODE (inner_compare
);
6757 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6759 /* If the mode changed, we have to change SET_DEST, the mode in the
6760 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6761 a hard register, just build new versions with the proper mode. If it
6762 is a pseudo, we lose unless it is only time we set the pseudo, in
6763 which case we can safely change its mode. */
6764 if (!HAVE_cc0
&& compare_mode
!= GET_MODE (dest
))
6766 if (can_change_dest_mode (dest
, 0, compare_mode
))
6768 unsigned int regno
= REGNO (dest
);
6771 if (regno
< FIRST_PSEUDO_REGISTER
)
6772 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6775 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6776 new_dest
= regno_reg_rtx
[regno
];
6779 SUBST (SET_DEST (x
), new_dest
);
6780 SUBST (XEXP (*cc_use
, 0), new_dest
);
6786 #endif /* SELECT_CC_MODE */
6788 /* If the code changed, we have to build a new comparison in
6789 undobuf.other_insn. */
6790 if (new_code
!= old_code
)
6792 int other_changed_previously
= other_changed
;
6793 unsigned HOST_WIDE_INT mask
;
6794 rtx old_cc_use
= *cc_use
;
6796 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6800 /* If the only change we made was to change an EQ into an NE or
6801 vice versa, OP0 has only one bit that might be nonzero, and OP1
6802 is zero, check if changing the user of the condition code will
6803 produce a valid insn. If it won't, we can keep the original code
6804 in that insn by surrounding our operation with an XOR. */
6806 if (((old_code
== NE
&& new_code
== EQ
)
6807 || (old_code
== EQ
&& new_code
== NE
))
6808 && ! other_changed_previously
&& op1
== const0_rtx
6809 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
6810 && pow2p_hwi (mask
= nonzero_bits (op0
, GET_MODE (op0
))))
6812 rtx pat
= PATTERN (other_insn
), note
= 0;
6814 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
6815 && ! check_asm_operands (pat
)))
6817 *cc_use
= old_cc_use
;
6820 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
), op0
,
6828 undobuf
.other_insn
= other_insn
;
6830 /* Don't generate a compare of a CC with 0, just use that CC. */
6831 if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
6833 SUBST (SET_SRC (x
), op0
);
6836 /* Otherwise, if we didn't previously have the same COMPARE we
6837 want, create it from scratch. */
6838 else if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
6839 || XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
6841 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6847 /* Get SET_SRC in a form where we have placed back any
6848 compound expressions. Then do the checks below. */
6849 src
= make_compound_operation (src
, SET
);
6850 SUBST (SET_SRC (x
), src
);
6853 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6854 and X being a REG or (subreg (reg)), we may be able to convert this to
6855 (set (subreg:m2 x) (op)).
6857 We can always do this if M1 is narrower than M2 because that means that
6858 we only care about the low bits of the result.
6860 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6861 perform a narrower operation than requested since the high-order bits will
6862 be undefined. On machine where it is defined, this transformation is safe
6863 as long as M1 and M2 have the same number of words. */
6865 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6866 && !OBJECT_P (SUBREG_REG (src
))
6867 && (((GET_MODE_SIZE (GET_MODE (src
)) + (UNITS_PER_WORD
- 1))
6869 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
)))
6870 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))
6871 && (WORD_REGISTER_OPERATIONS
|| !paradoxical_subreg_p (src
))
6872 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
6873 && !REG_CAN_CHANGE_MODE_P (REGNO (dest
),
6874 GET_MODE (SUBREG_REG (src
)),
6877 || (GET_CODE (dest
) == SUBREG
6878 && REG_P (SUBREG_REG (dest
)))))
6880 SUBST (SET_DEST (x
),
6881 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
6883 SUBST (SET_SRC (x
), SUBREG_REG (src
));
6885 src
= SET_SRC (x
), dest
= SET_DEST (x
);
6888 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6891 && partial_subreg_p (src
)
6892 && subreg_lowpart_p (src
))
6894 rtx inner
= SUBREG_REG (src
);
6895 machine_mode inner_mode
= GET_MODE (inner
);
6897 /* Here we make sure that we don't have a sign bit on. */
6898 if (val_signbit_known_clear_p (GET_MODE (src
),
6899 nonzero_bits (inner
, inner_mode
)))
6901 SUBST (SET_SRC (x
), inner
);
6906 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6907 would require a paradoxical subreg. Replace the subreg with a
6908 zero_extend to avoid the reload that would otherwise be required. */
6910 enum rtx_code extend_op
;
6911 if (paradoxical_subreg_p (src
)
6912 && MEM_P (SUBREG_REG (src
))
6913 && (extend_op
= load_extend_op (GET_MODE (SUBREG_REG (src
)))) != UNKNOWN
)
6916 gen_rtx_fmt_e (extend_op
, GET_MODE (src
), SUBREG_REG (src
)));
6921 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6922 are comparing an item known to be 0 or -1 against 0, use a logical
6923 operation instead. Check for one of the arms being an IOR of the other
6924 arm with some value. We compute three terms to be IOR'ed together. In
6925 practice, at most two will be nonzero. Then we do the IOR's. */
6927 if (GET_CODE (dest
) != PC
6928 && GET_CODE (src
) == IF_THEN_ELSE
6929 && is_int_mode (GET_MODE (src
), &int_mode
)
6930 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
6931 && XEXP (XEXP (src
, 0), 1) == const0_rtx
6932 && int_mode
== GET_MODE (XEXP (XEXP (src
, 0), 0))
6933 && (!HAVE_conditional_move
6934 || ! can_conditionally_move_p (int_mode
))
6935 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0), int_mode
)
6936 == GET_MODE_PRECISION (int_mode
))
6937 && ! side_effects_p (src
))
6939 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6940 ? XEXP (src
, 1) : XEXP (src
, 2));
6941 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6942 ? XEXP (src
, 2) : XEXP (src
, 1));
6943 rtx term1
= const0_rtx
, term2
, term3
;
6945 if (GET_CODE (true_rtx
) == IOR
6946 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
6947 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
6948 else if (GET_CODE (true_rtx
) == IOR
6949 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
6950 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
6951 else if (GET_CODE (false_rtx
) == IOR
6952 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
6953 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
6954 else if (GET_CODE (false_rtx
) == IOR
6955 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
6956 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
6958 term2
= simplify_gen_binary (AND
, int_mode
,
6959 XEXP (XEXP (src
, 0), 0), true_rtx
);
6960 term3
= simplify_gen_binary (AND
, int_mode
,
6961 simplify_gen_unary (NOT
, int_mode
,
6962 XEXP (XEXP (src
, 0), 0),
6967 simplify_gen_binary (IOR
, int_mode
,
6968 simplify_gen_binary (IOR
, int_mode
,
6975 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6976 whole thing fail. */
6977 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
6979 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
6982 /* Convert this into a field assignment operation, if possible. */
6983 return make_field_assignment (x
);
6986 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6990 simplify_logical (rtx x
)
6992 rtx op0
= XEXP (x
, 0);
6993 rtx op1
= XEXP (x
, 1);
6994 scalar_int_mode mode
;
6996 switch (GET_CODE (x
))
6999 /* We can call simplify_and_const_int only if we don't lose
7000 any (sign) bits when converting INTVAL (op1) to
7001 "unsigned HOST_WIDE_INT". */
7002 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
7003 && CONST_INT_P (op1
)
7004 && (HWI_COMPUTABLE_MODE_P (mode
)
7005 || INTVAL (op1
) > 0))
7007 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
7008 if (GET_CODE (x
) != AND
)
7015 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7016 apply the distributive law and then the inverse distributive
7017 law to see if things simplify. */
7018 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
7020 rtx result
= distribute_and_simplify_rtx (x
, 0);
7024 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
7026 rtx result
= distribute_and_simplify_rtx (x
, 1);
7033 /* If we have (ior (and A B) C), apply the distributive law and then
7034 the inverse distributive law to see if things simplify. */
7036 if (GET_CODE (op0
) == AND
)
7038 rtx result
= distribute_and_simplify_rtx (x
, 0);
7043 if (GET_CODE (op1
) == AND
)
7045 rtx result
= distribute_and_simplify_rtx (x
, 1);
7058 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7059 operations" because they can be replaced with two more basic operations.
7060 ZERO_EXTEND is also considered "compound" because it can be replaced with
7061 an AND operation, which is simpler, though only one operation.
7063 The function expand_compound_operation is called with an rtx expression
7064 and will convert it to the appropriate shifts and AND operations,
7065 simplifying at each stage.
7067 The function make_compound_operation is called to convert an expression
7068 consisting of shifts and ANDs into the equivalent compound expression.
7069 It is the inverse of this function, loosely speaking. */
7072 expand_compound_operation (rtx x
)
7074 unsigned HOST_WIDE_INT pos
= 0, len
;
7076 unsigned int modewidth
;
7078 scalar_int_mode inner_mode
;
7080 switch (GET_CODE (x
))
7086 /* We can't necessarily use a const_int for a multiword mode;
7087 it depends on implicitly extending the value.
7088 Since we don't know the right way to extend it,
7089 we can't tell whether the implicit way is right.
7091 Even for a mode that is no wider than a const_int,
7092 we can't win, because we need to sign extend one of its bits through
7093 the rest of it, and we don't know which bit. */
7094 if (CONST_INT_P (XEXP (x
, 0)))
7097 /* Reject modes that aren't scalar integers because turning vector
7098 or complex modes into shifts causes problems. */
7099 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7102 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7103 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7104 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7105 reloaded. If not for that, MEM's would very rarely be safe.
7107 Reject modes bigger than a word, because we might not be able
7108 to reference a two-register group starting with an arbitrary register
7109 (and currently gen_lowpart might crash for a SUBREG). */
7111 if (GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7114 len
= GET_MODE_PRECISION (inner_mode
);
7115 /* If the inner object has VOIDmode (the only way this can happen
7116 is if it is an ASM_OPERANDS), we can't do anything since we don't
7117 know how much masking to do. */
7129 /* If the operand is a CLOBBER, just return it. */
7130 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
7133 if (!CONST_INT_P (XEXP (x
, 1))
7134 || !CONST_INT_P (XEXP (x
, 2)))
7137 /* Reject modes that aren't scalar integers because turning vector
7138 or complex modes into shifts causes problems. */
7139 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7142 len
= INTVAL (XEXP (x
, 1));
7143 pos
= INTVAL (XEXP (x
, 2));
7145 /* This should stay within the object being extracted, fail otherwise. */
7146 if (len
+ pos
> GET_MODE_PRECISION (inner_mode
))
7149 if (BITS_BIG_ENDIAN
)
7150 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
7158 /* We've rejected non-scalar operations by now. */
7159 scalar_int_mode mode
= as_a
<scalar_int_mode
> (GET_MODE (x
));
7161 /* Convert sign extension to zero extension, if we know that the high
7162 bit is not set, as this is easier to optimize. It will be converted
7163 back to cheaper alternative in make_extraction. */
7164 if (GET_CODE (x
) == SIGN_EXTEND
7165 && HWI_COMPUTABLE_MODE_P (mode
)
7166 && ((nonzero_bits (XEXP (x
, 0), inner_mode
)
7167 & ~(((unsigned HOST_WIDE_INT
) GET_MODE_MASK (inner_mode
)) >> 1))
7170 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, XEXP (x
, 0));
7171 rtx temp2
= expand_compound_operation (temp
);
7173 /* Make sure this is a profitable operation. */
7174 if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7175 > set_src_cost (temp2
, mode
, optimize_this_for_speed_p
))
7177 else if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7178 > set_src_cost (temp
, mode
, optimize_this_for_speed_p
))
7184 /* We can optimize some special cases of ZERO_EXTEND. */
7185 if (GET_CODE (x
) == ZERO_EXTEND
)
7187 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7188 know that the last value didn't have any inappropriate bits
7190 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7191 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7192 && HWI_COMPUTABLE_MODE_P (mode
)
7193 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), mode
)
7194 & ~GET_MODE_MASK (inner_mode
)) == 0)
7195 return XEXP (XEXP (x
, 0), 0);
7197 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7198 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7199 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7200 && subreg_lowpart_p (XEXP (x
, 0))
7201 && HWI_COMPUTABLE_MODE_P (mode
)
7202 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), mode
)
7203 & ~GET_MODE_MASK (inner_mode
)) == 0)
7204 return SUBREG_REG (XEXP (x
, 0));
7206 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7207 is a comparison and STORE_FLAG_VALUE permits. This is like
7208 the first case, but it works even when MODE is larger
7209 than HOST_WIDE_INT. */
7210 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7211 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7212 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
7213 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7214 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7215 return XEXP (XEXP (x
, 0), 0);
7217 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7218 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7219 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7220 && subreg_lowpart_p (XEXP (x
, 0))
7221 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
7222 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7223 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7224 return SUBREG_REG (XEXP (x
, 0));
7228 /* If we reach here, we want to return a pair of shifts. The inner
7229 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7230 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7231 logical depending on the value of UNSIGNEDP.
7233 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7234 converted into an AND of a shift.
7236 We must check for the case where the left shift would have a negative
7237 count. This can happen in a case like (x >> 31) & 255 on machines
7238 that can't shift by a constant. On those machines, we would first
7239 combine the shift with the AND to produce a variable-position
7240 extraction. Then the constant of 31 would be substituted in
7241 to produce such a position. */
7243 modewidth
= GET_MODE_PRECISION (mode
);
7244 if (modewidth
>= pos
+ len
)
7246 tem
= gen_lowpart (mode
, XEXP (x
, 0));
7247 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7249 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
7250 tem
, modewidth
- pos
- len
);
7251 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
7252 mode
, tem
, modewidth
- len
);
7254 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
7255 tem
= simplify_and_const_int (NULL_RTX
, mode
,
7256 simplify_shift_const (NULL_RTX
, LSHIFTRT
,
7259 (HOST_WIDE_INT_1U
<< len
) - 1);
7261 /* Any other cases we can't handle. */
7264 /* If we couldn't do this for some reason, return the original
7266 if (GET_CODE (tem
) == CLOBBER
)
7272 /* X is a SET which contains an assignment of one object into
7273 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7274 or certain SUBREGS). If possible, convert it into a series of
7277 We half-heartedly support variable positions, but do not at all
7278 support variable lengths. */
7281 expand_field_assignment (const_rtx x
)
7284 rtx pos
; /* Always counts from low bit. */
7286 rtx mask
, cleared
, masked
;
7287 scalar_int_mode compute_mode
;
7289 /* Loop until we find something we can't simplify. */
7292 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
7293 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
7295 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
7296 len
= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x
), 0)));
7297 pos
= GEN_INT (subreg_lsb (XEXP (SET_DEST (x
), 0)));
7299 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
7300 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
7302 inner
= XEXP (SET_DEST (x
), 0);
7303 len
= INTVAL (XEXP (SET_DEST (x
), 1));
7304 pos
= XEXP (SET_DEST (x
), 2);
7306 /* A constant position should stay within the width of INNER. */
7307 if (CONST_INT_P (pos
)
7308 && INTVAL (pos
) + len
> GET_MODE_PRECISION (GET_MODE (inner
)))
7311 if (BITS_BIG_ENDIAN
)
7313 if (CONST_INT_P (pos
))
7314 pos
= GEN_INT (GET_MODE_PRECISION (GET_MODE (inner
)) - len
7316 else if (GET_CODE (pos
) == MINUS
7317 && CONST_INT_P (XEXP (pos
, 1))
7318 && (INTVAL (XEXP (pos
, 1))
7319 == GET_MODE_PRECISION (GET_MODE (inner
)) - len
))
7320 /* If position is ADJUST - X, new position is X. */
7321 pos
= XEXP (pos
, 0);
7324 HOST_WIDE_INT prec
= GET_MODE_PRECISION (GET_MODE (inner
));
7325 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
7326 gen_int_mode (prec
- len
,
7333 /* A SUBREG between two modes that occupy the same numbers of words
7334 can be done by moving the SUBREG to the source. */
7335 else if (GET_CODE (SET_DEST (x
)) == SUBREG
7336 /* We need SUBREGs to compute nonzero_bits properly. */
7337 && nonzero_sign_valid
7338 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
7339 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
7340 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
7341 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
7343 x
= gen_rtx_SET (SUBREG_REG (SET_DEST (x
)),
7345 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
7352 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7353 inner
= SUBREG_REG (inner
);
7355 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7356 if (!is_a
<scalar_int_mode
> (GET_MODE (inner
), &compute_mode
))
7358 /* Don't do anything for vector or complex integral types. */
7359 if (! FLOAT_MODE_P (GET_MODE (inner
)))
7362 /* Try to find an integral mode to pun with. */
7363 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner
)), 0)
7364 .exists (&compute_mode
))
7367 inner
= gen_lowpart (compute_mode
, inner
);
7370 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7371 if (len
>= HOST_BITS_PER_WIDE_INT
)
7374 /* Don't try to compute in too wide unsupported modes. */
7375 if (!targetm
.scalar_mode_supported_p (compute_mode
))
7378 /* Now compute the equivalent expression. Make a copy of INNER
7379 for the SET_DEST in case it is a MEM into which we will substitute;
7380 we don't want shared RTL in that case. */
7381 mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< len
) - 1,
7383 cleared
= simplify_gen_binary (AND
, compute_mode
,
7384 simplify_gen_unary (NOT
, compute_mode
,
7385 simplify_gen_binary (ASHIFT
,
7390 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
7391 simplify_gen_binary (
7393 gen_lowpart (compute_mode
, SET_SRC (x
)),
7397 x
= gen_rtx_SET (copy_rtx (inner
),
7398 simplify_gen_binary (IOR
, compute_mode
,
7405 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7406 it is an RTX that represents the (variable) starting position; otherwise,
7407 POS is the (constant) starting bit position. Both are counted from the LSB.
7409 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7411 IN_DEST is nonzero if this is a reference in the destination of a SET.
7412 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7413 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7416 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7417 ZERO_EXTRACT should be built even for bits starting at bit 0.
7419 MODE is the desired mode of the result (if IN_DEST == 0).
7421 The result is an RTX for the extraction or NULL_RTX if the target
7425 make_extraction (machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7426 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7427 int in_dest
, int in_compare
)
7429 /* This mode describes the size of the storage area
7430 to fetch the overall value from. Within that, we
7431 ignore the POS lowest bits, etc. */
7432 machine_mode is_mode
= GET_MODE (inner
);
7433 machine_mode inner_mode
;
7434 scalar_int_mode wanted_inner_mode
;
7435 scalar_int_mode wanted_inner_reg_mode
= word_mode
;
7436 scalar_int_mode pos_mode
= word_mode
;
7437 machine_mode extraction_mode
= word_mode
;
7439 rtx orig_pos_rtx
= pos_rtx
;
7440 HOST_WIDE_INT orig_pos
;
7442 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7443 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7445 if (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7447 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7448 consider just the QI as the memory to extract from.
7449 The subreg adds or removes high bits; its mode is
7450 irrelevant to the meaning of this extraction,
7451 since POS and LEN count from the lsb. */
7452 if (MEM_P (SUBREG_REG (inner
)))
7453 is_mode
= GET_MODE (SUBREG_REG (inner
));
7454 inner
= SUBREG_REG (inner
);
7456 else if (GET_CODE (inner
) == ASHIFT
7457 && CONST_INT_P (XEXP (inner
, 1))
7458 && pos_rtx
== 0 && pos
== 0
7459 && len
> UINTVAL (XEXP (inner
, 1)))
7461 /* We're extracting the least significant bits of an rtx
7462 (ashift X (const_int C)), where LEN > C. Extract the
7463 least significant (LEN - C) bits of X, giving an rtx
7464 whose mode is MODE, then shift it left C times. */
7465 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7466 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7467 unsignedp
, in_dest
, in_compare
);
7469 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7471 else if (GET_CODE (inner
) == TRUNCATE
)
7472 inner
= XEXP (inner
, 0);
7474 inner_mode
= GET_MODE (inner
);
7476 /* See if this can be done without an extraction. We never can if the
7477 width of the field is not the same as that of some integer mode. For
7478 registers, we can only avoid the extraction if the position is at the
7479 low-order bit and this is either not in the destination or we have the
7480 appropriate STRICT_LOW_PART operation available.
7482 For MEM, we can avoid an extract if the field starts on an appropriate
7483 boundary and we can change the mode of the memory reference. */
7485 scalar_int_mode tmode
;
7486 if (int_mode_for_size (len
, 1).exists (&tmode
)
7487 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7489 && (pos
== 0 || REG_P (inner
))
7490 && (inner_mode
== tmode
7492 || TRULY_NOOP_TRUNCATION_MODES_P (tmode
, inner_mode
)
7493 || reg_truncated_to_mode (tmode
, inner
))
7496 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7497 || (MEM_P (inner
) && pos_rtx
== 0
7499 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7500 : BITS_PER_UNIT
)) == 0
7501 /* We can't do this if we are widening INNER_MODE (it
7502 may not be aligned, for one thing). */
7503 && !paradoxical_subreg_p (tmode
, inner_mode
)
7504 && (inner_mode
== tmode
7505 || (! mode_dependent_address_p (XEXP (inner
, 0),
7506 MEM_ADDR_SPACE (inner
))
7507 && ! MEM_VOLATILE_P (inner
))))))
7509 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7510 field. If the original and current mode are the same, we need not
7511 adjust the offset. Otherwise, we do if bytes big endian.
7513 If INNER is not a MEM, get a piece consisting of just the field
7514 of interest (in this case POS % BITS_PER_WORD must be 0). */
7518 HOST_WIDE_INT offset
;
7520 /* POS counts from lsb, but make OFFSET count in memory order. */
7521 if (BYTES_BIG_ENDIAN
)
7522 offset
= (GET_MODE_PRECISION (is_mode
) - len
- pos
) / BITS_PER_UNIT
;
7524 offset
= pos
/ BITS_PER_UNIT
;
7526 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7528 else if (REG_P (inner
))
7530 if (tmode
!= inner_mode
)
7532 /* We can't call gen_lowpart in a DEST since we
7533 always want a SUBREG (see below) and it would sometimes
7534 return a new hard register. */
7538 = subreg_offset_from_lsb (tmode
, inner_mode
, pos
);
7540 /* Avoid creating invalid subregs, for example when
7541 simplifying (x>>32)&255. */
7542 if (!validate_subreg (tmode
, inner_mode
, inner
, offset
))
7545 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, offset
);
7548 new_rtx
= gen_lowpart (tmode
, inner
);
7554 new_rtx
= force_to_mode (inner
, tmode
,
7555 len
>= HOST_BITS_PER_WIDE_INT
7557 : (HOST_WIDE_INT_1U
<< len
) - 1, 0);
7559 /* If this extraction is going into the destination of a SET,
7560 make a STRICT_LOW_PART unless we made a MEM. */
7563 return (MEM_P (new_rtx
) ? new_rtx
7564 : (GET_CODE (new_rtx
) != SUBREG
7565 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7566 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7571 if (CONST_SCALAR_INT_P (new_rtx
))
7572 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7573 mode
, new_rtx
, tmode
);
7575 /* If we know that no extraneous bits are set, and that the high
7576 bit is not set, convert the extraction to the cheaper of
7577 sign and zero extension, that are equivalent in these cases. */
7578 if (flag_expensive_optimizations
7579 && (HWI_COMPUTABLE_MODE_P (tmode
)
7580 && ((nonzero_bits (new_rtx
, tmode
)
7581 & ~(((unsigned HOST_WIDE_INT
)GET_MODE_MASK (tmode
)) >> 1))
7584 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7585 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7587 /* Prefer ZERO_EXTENSION, since it gives more information to
7589 if (set_src_cost (temp
, mode
, optimize_this_for_speed_p
)
7590 <= set_src_cost (temp1
, mode
, optimize_this_for_speed_p
))
7595 /* Otherwise, sign- or zero-extend unless we already are in the
7598 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7602 /* Unless this is a COMPARE or we have a funny memory reference,
7603 don't do anything with zero-extending field extracts starting at
7604 the low-order bit since they are simple AND operations. */
7605 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7606 && ! in_compare
&& unsignedp
)
7609 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7610 if the position is not a constant and the length is not 1. In all
7611 other cases, we would only be going outside our object in cases when
7612 an original shift would have been undefined. */
7614 && ((pos_rtx
== 0 && pos
+ len
> GET_MODE_PRECISION (is_mode
))
7615 || (pos_rtx
!= 0 && len
!= 1)))
7618 enum extraction_pattern pattern
= (in_dest
? EP_insv
7619 : unsignedp
? EP_extzv
: EP_extv
);
7621 /* If INNER is not from memory, we want it to have the mode of a register
7622 extraction pattern's structure operand, or word_mode if there is no
7623 such pattern. The same applies to extraction_mode and pos_mode
7624 and their respective operands.
7626 For memory, assume that the desired extraction_mode and pos_mode
7627 are the same as for a register operation, since at present we don't
7628 have named patterns for aligned memory structures. */
7629 struct extraction_insn insn
;
7630 if (get_best_reg_extraction_insn (&insn
, pattern
,
7631 GET_MODE_BITSIZE (inner_mode
), mode
))
7633 wanted_inner_reg_mode
= insn
.struct_mode
.require ();
7634 pos_mode
= insn
.pos_mode
;
7635 extraction_mode
= insn
.field_mode
;
7638 /* Never narrow an object, since that might not be safe. */
7640 if (mode
!= VOIDmode
7641 && partial_subreg_p (extraction_mode
, mode
))
7642 extraction_mode
= mode
;
7645 wanted_inner_mode
= wanted_inner_reg_mode
;
7648 /* Be careful not to go beyond the extracted object and maintain the
7649 natural alignment of the memory. */
7650 wanted_inner_mode
= smallest_int_mode_for_size (len
);
7651 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7652 > GET_MODE_BITSIZE (wanted_inner_mode
))
7653 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
).require ();
7658 if (BITS_BIG_ENDIAN
)
7660 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7661 BITS_BIG_ENDIAN style. If position is constant, compute new
7662 position. Otherwise, build subtraction.
7663 Note that POS is relative to the mode of the original argument.
7664 If it's a MEM we need to recompute POS relative to that.
7665 However, if we're extracting from (or inserting into) a register,
7666 we want to recompute POS relative to wanted_inner_mode. */
7667 int width
= (MEM_P (inner
)
7668 ? GET_MODE_BITSIZE (is_mode
)
7669 : GET_MODE_BITSIZE (wanted_inner_mode
));
7672 pos
= width
- len
- pos
;
7675 = gen_rtx_MINUS (GET_MODE (pos_rtx
),
7676 gen_int_mode (width
- len
, GET_MODE (pos_rtx
)),
7678 /* POS may be less than 0 now, but we check for that below.
7679 Note that it can only be less than 0 if !MEM_P (inner). */
7682 /* If INNER has a wider mode, and this is a constant extraction, try to
7683 make it smaller and adjust the byte to point to the byte containing
7685 if (wanted_inner_mode
!= VOIDmode
7686 && inner_mode
!= wanted_inner_mode
7688 && partial_subreg_p (wanted_inner_mode
, is_mode
)
7690 && ! mode_dependent_address_p (XEXP (inner
, 0), MEM_ADDR_SPACE (inner
))
7691 && ! MEM_VOLATILE_P (inner
))
7695 /* The computations below will be correct if the machine is big
7696 endian in both bits and bytes or little endian in bits and bytes.
7697 If it is mixed, we must adjust. */
7699 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7700 adjust OFFSET to compensate. */
7701 if (BYTES_BIG_ENDIAN
7702 && paradoxical_subreg_p (is_mode
, inner_mode
))
7703 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7705 /* We can now move to the desired byte. */
7706 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7707 * GET_MODE_SIZE (wanted_inner_mode
);
7708 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7710 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7711 && is_mode
!= wanted_inner_mode
)
7712 offset
= (GET_MODE_SIZE (is_mode
)
7713 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7715 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7718 /* If INNER is not memory, get it into the proper mode. If we are changing
7719 its mode, POS must be a constant and smaller than the size of the new
7721 else if (!MEM_P (inner
))
7723 /* On the LHS, don't create paradoxical subregs implicitely truncating
7724 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7726 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner
),
7730 if (GET_MODE (inner
) != wanted_inner_mode
7732 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7738 inner
= force_to_mode (inner
, wanted_inner_mode
,
7740 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7742 : (((HOST_WIDE_INT_1U
<< len
) - 1)
7747 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7748 have to zero extend. Otherwise, we can just use a SUBREG.
7750 We dealt with constant rtxes earlier, so pos_rtx cannot
7751 have VOIDmode at this point. */
7753 && (GET_MODE_SIZE (pos_mode
)
7754 > GET_MODE_SIZE (as_a
<scalar_int_mode
> (GET_MODE (pos_rtx
)))))
7756 rtx temp
= simplify_gen_unary (ZERO_EXTEND
, pos_mode
, pos_rtx
,
7757 GET_MODE (pos_rtx
));
7759 /* If we know that no extraneous bits are set, and that the high
7760 bit is not set, convert extraction to cheaper one - either
7761 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7763 if (flag_expensive_optimizations
7764 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx
))
7765 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7766 & ~(((unsigned HOST_WIDE_INT
)
7767 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7771 rtx temp1
= simplify_gen_unary (SIGN_EXTEND
, pos_mode
, pos_rtx
,
7772 GET_MODE (pos_rtx
));
7774 /* Prefer ZERO_EXTENSION, since it gives more information to
7776 if (set_src_cost (temp1
, pos_mode
, optimize_this_for_speed_p
)
7777 < set_src_cost (temp
, pos_mode
, optimize_this_for_speed_p
))
7783 /* Make POS_RTX unless we already have it and it is correct. If we don't
7784 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7786 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
7787 pos_rtx
= orig_pos_rtx
;
7789 else if (pos_rtx
== 0)
7790 pos_rtx
= GEN_INT (pos
);
7792 /* Make the required operation. See if we can use existing rtx. */
7793 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
7794 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
7796 new_rtx
= gen_lowpart (mode
, new_rtx
);
7801 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7802 can be commuted with any other operations in X. Return X without
7803 that shift if so. */
7806 extract_left_shift (scalar_int_mode mode
, rtx x
, int count
)
7808 enum rtx_code code
= GET_CODE (x
);
7814 /* This is the shift itself. If it is wide enough, we will return
7815 either the value being shifted if the shift count is equal to
7816 COUNT or a shift for the difference. */
7817 if (CONST_INT_P (XEXP (x
, 1))
7818 && INTVAL (XEXP (x
, 1)) >= count
)
7819 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
7820 INTVAL (XEXP (x
, 1)) - count
);
7824 if ((tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
7825 return simplify_gen_unary (code
, mode
, tem
, mode
);
7829 case PLUS
: case IOR
: case XOR
: case AND
:
7830 /* If we can safely shift this constant and we find the inner shift,
7831 make a new operation. */
7832 if (CONST_INT_P (XEXP (x
, 1))
7833 && (UINTVAL (XEXP (x
, 1))
7834 & (((HOST_WIDE_INT_1U
<< count
)) - 1)) == 0
7835 && (tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
7837 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1)) >> count
;
7838 return simplify_gen_binary (code
, mode
, tem
,
7839 gen_int_mode (val
, mode
));
7850 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7851 level of the expression and MODE is its mode. IN_CODE is as for
7852 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7853 that should be used when recursing on operands of *X_PTR.
7855 There are two possible actions:
7857 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7858 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7860 - Return a new rtx, which the caller returns directly. */
7863 make_compound_operation_int (scalar_int_mode mode
, rtx
*x_ptr
,
7864 enum rtx_code in_code
,
7865 enum rtx_code
*next_code_ptr
)
7868 enum rtx_code next_code
= *next_code_ptr
;
7869 enum rtx_code code
= GET_CODE (x
);
7870 int mode_width
= GET_MODE_PRECISION (mode
);
7875 scalar_int_mode inner_mode
;
7876 bool equality_comparison
= false;
7880 equality_comparison
= true;
7884 /* Process depending on the code of this operation. If NEW is set
7885 nonzero, it will be returned. */
7890 /* Convert shifts by constants into multiplications if inside
7892 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
7893 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
7894 && INTVAL (XEXP (x
, 1)) >= 0)
7896 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
7897 HOST_WIDE_INT multval
= HOST_WIDE_INT_1
<< count
;
7899 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7900 if (GET_CODE (new_rtx
) == NEG
)
7902 new_rtx
= XEXP (new_rtx
, 0);
7905 multval
= trunc_int_for_mode (multval
, mode
);
7906 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, gen_int_mode (multval
, mode
));
7913 lhs
= make_compound_operation (lhs
, next_code
);
7914 rhs
= make_compound_operation (rhs
, next_code
);
7915 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
)
7917 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
7919 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7921 else if (GET_CODE (lhs
) == MULT
7922 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
7924 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
7925 simplify_gen_unary (NEG
, mode
,
7928 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7932 SUBST (XEXP (x
, 0), lhs
);
7933 SUBST (XEXP (x
, 1), rhs
);
7935 maybe_swap_commutative_operands (x
);
7941 lhs
= make_compound_operation (lhs
, next_code
);
7942 rhs
= make_compound_operation (rhs
, next_code
);
7943 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
)
7945 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
7947 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7949 else if (GET_CODE (rhs
) == MULT
7950 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
7952 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
7953 simplify_gen_unary (NEG
, mode
,
7956 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7960 SUBST (XEXP (x
, 0), lhs
);
7961 SUBST (XEXP (x
, 1), rhs
);
7966 /* If the second operand is not a constant, we can't do anything
7968 if (!CONST_INT_P (XEXP (x
, 1)))
7971 /* If the constant is a power of two minus one and the first operand
7972 is a logical right shift, make an extraction. */
7973 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7974 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7976 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7977 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1),
7978 i
, 1, 0, in_code
== COMPARE
);
7981 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7982 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
7983 && subreg_lowpart_p (XEXP (x
, 0))
7984 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (XEXP (x
, 0))),
7986 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
7987 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7989 rtx inner_x0
= SUBREG_REG (XEXP (x
, 0));
7990 new_rtx
= make_compound_operation (XEXP (inner_x0
, 0), next_code
);
7991 new_rtx
= make_extraction (inner_mode
, new_rtx
, 0,
7993 i
, 1, 0, in_code
== COMPARE
);
7995 /* If we narrowed the mode when dropping the subreg, then we lose. */
7996 if (GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (mode
))
7999 /* If that didn't give anything, see if the AND simplifies on
8001 if (!new_rtx
&& i
>= 0)
8003 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8004 new_rtx
= make_extraction (mode
, new_rtx
, 0, NULL_RTX
, i
, 1,
8005 0, in_code
== COMPARE
);
8008 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8009 else if ((GET_CODE (XEXP (x
, 0)) == XOR
8010 || GET_CODE (XEXP (x
, 0)) == IOR
)
8011 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
8012 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
8013 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8015 /* Apply the distributive law, and then try to make extractions. */
8016 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
8017 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
8019 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
8021 new_rtx
= make_compound_operation (new_rtx
, in_code
);
8024 /* If we are have (and (rotate X C) M) and C is larger than the number
8025 of bits in M, this is an extraction. */
8027 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
8028 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8029 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
8030 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
8032 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8033 new_rtx
= make_extraction (mode
, new_rtx
,
8034 (GET_MODE_PRECISION (mode
)
8035 - INTVAL (XEXP (XEXP (x
, 0), 1))),
8036 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8039 /* On machines without logical shifts, if the operand of the AND is
8040 a logical shift and our mask turns off all the propagated sign
8041 bits, we can replace the logical shift with an arithmetic shift. */
8042 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8043 && !have_insn_for (LSHIFTRT
, mode
)
8044 && have_insn_for (ASHIFTRT
, mode
)
8045 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8046 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8047 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8048 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
8050 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
8052 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
8053 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
8055 gen_rtx_ASHIFTRT (mode
,
8056 make_compound_operation (XEXP (XEXP (x
,
8060 XEXP (XEXP (x
, 0), 1)));
8063 /* If the constant is one less than a power of two, this might be
8064 representable by an extraction even if no shift is present.
8065 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8066 we are in a COMPARE. */
8067 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8068 new_rtx
= make_extraction (mode
,
8069 make_compound_operation (XEXP (x
, 0),
8071 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8073 /* If we are in a comparison and this is an AND with a power of two,
8074 convert this into the appropriate bit extract. */
8075 else if (in_code
== COMPARE
8076 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
8077 && (equality_comparison
|| i
< GET_MODE_PRECISION (mode
) - 1))
8078 new_rtx
= make_extraction (mode
,
8079 make_compound_operation (XEXP (x
, 0),
8081 i
, NULL_RTX
, 1, 1, 0, 1);
8083 /* If the one operand is a paradoxical subreg of a register or memory and
8084 the constant (limited to the smaller mode) has only zero bits where
8085 the sub expression has known zero bits, this can be expressed as
8087 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
)
8091 sub
= XEXP (XEXP (x
, 0), 0);
8092 machine_mode sub_mode
= GET_MODE (sub
);
8093 if ((REG_P (sub
) || MEM_P (sub
))
8094 && GET_MODE_PRECISION (sub_mode
) < mode_width
)
8096 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (sub_mode
);
8097 unsigned HOST_WIDE_INT mask
;
8099 /* original AND constant with all the known zero bits set */
8100 mask
= UINTVAL (XEXP (x
, 1)) | (~nonzero_bits (sub
, sub_mode
));
8101 if ((mask
& mode_mask
) == mode_mask
)
8103 new_rtx
= make_compound_operation (sub
, next_code
);
8104 new_rtx
= make_extraction (mode
, new_rtx
, 0, 0,
8105 GET_MODE_PRECISION (sub_mode
),
8106 1, 0, in_code
== COMPARE
);
8114 /* If the sign bit is known to be zero, replace this with an
8115 arithmetic shift. */
8116 if (have_insn_for (ASHIFTRT
, mode
)
8117 && ! have_insn_for (LSHIFTRT
, mode
)
8118 && mode_width
<= HOST_BITS_PER_WIDE_INT
8119 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
8121 new_rtx
= gen_rtx_ASHIFTRT (mode
,
8122 make_compound_operation (XEXP (x
, 0),
8134 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8135 this is a SIGN_EXTRACT. */
8136 if (CONST_INT_P (rhs
)
8137 && GET_CODE (lhs
) == ASHIFT
8138 && CONST_INT_P (XEXP (lhs
, 1))
8139 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
8140 && INTVAL (XEXP (lhs
, 1)) >= 0
8141 && INTVAL (rhs
) < mode_width
)
8143 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
8144 new_rtx
= make_extraction (mode
, new_rtx
,
8145 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
8146 NULL_RTX
, mode_width
- INTVAL (rhs
),
8147 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8151 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8152 If so, try to merge the shifts into a SIGN_EXTEND. We could
8153 also do this for some cases of SIGN_EXTRACT, but it doesn't
8154 seem worth the effort; the case checked for occurs on Alpha. */
8157 && ! (GET_CODE (lhs
) == SUBREG
8158 && (OBJECT_P (SUBREG_REG (lhs
))))
8159 && CONST_INT_P (rhs
)
8160 && INTVAL (rhs
) >= 0
8161 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
8162 && INTVAL (rhs
) < mode_width
8163 && (new_rtx
= extract_left_shift (mode
, lhs
, INTVAL (rhs
))) != 0)
8164 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
,
8166 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
8167 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8172 /* Call ourselves recursively on the inner expression. If we are
8173 narrowing the object and it has a different RTL code from
8174 what it originally did, do this SUBREG as a force_to_mode. */
8176 rtx inner
= SUBREG_REG (x
), simplified
;
8177 enum rtx_code subreg_code
= in_code
;
8179 /* If the SUBREG is masking of a logical right shift,
8180 make an extraction. */
8181 if (GET_CODE (inner
) == LSHIFTRT
8182 && is_a
<scalar_int_mode
> (GET_MODE (inner
), &inner_mode
)
8183 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (inner_mode
)
8184 && CONST_INT_P (XEXP (inner
, 1))
8185 && UINTVAL (XEXP (inner
, 1)) < GET_MODE_PRECISION (inner_mode
)
8186 && subreg_lowpart_p (x
))
8188 new_rtx
= make_compound_operation (XEXP (inner
, 0), next_code
);
8189 int width
= GET_MODE_PRECISION (inner_mode
)
8190 - INTVAL (XEXP (inner
, 1));
8191 if (width
> mode_width
)
8193 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (inner
, 1),
8194 width
, 1, 0, in_code
== COMPARE
);
8198 /* If in_code is COMPARE, it isn't always safe to pass it through
8199 to the recursive make_compound_operation call. */
8200 if (subreg_code
== COMPARE
8201 && (!subreg_lowpart_p (x
)
8202 || GET_CODE (inner
) == SUBREG
8203 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8204 is (const_int 0), rather than
8205 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8206 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8207 for non-equality comparisons against 0 is not equivalent
8208 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8209 || (GET_CODE (inner
) == AND
8210 && CONST_INT_P (XEXP (inner
, 1))
8211 && partial_subreg_p (x
)
8212 && exact_log2 (UINTVAL (XEXP (inner
, 1)))
8213 >= GET_MODE_BITSIZE (mode
) - 1)))
8216 tem
= make_compound_operation (inner
, subreg_code
);
8219 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
8223 if (GET_CODE (tem
) != GET_CODE (inner
)
8224 && partial_subreg_p (x
)
8225 && subreg_lowpart_p (x
))
8228 = force_to_mode (tem
, mode
, HOST_WIDE_INT_M1U
, 0);
8230 /* If we have something other than a SUBREG, we might have
8231 done an expansion, so rerun ourselves. */
8232 if (GET_CODE (newer
) != SUBREG
)
8233 newer
= make_compound_operation (newer
, in_code
);
8235 /* force_to_mode can expand compounds. If it just re-expanded
8236 the compound, use gen_lowpart to convert to the desired
8238 if (rtx_equal_p (newer
, x
)
8239 /* Likewise if it re-expanded the compound only partially.
8240 This happens for SUBREG of ZERO_EXTRACT if they extract
8241 the same number of bits. */
8242 || (GET_CODE (newer
) == SUBREG
8243 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
8244 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
8245 && GET_CODE (inner
) == AND
8246 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
8247 return gen_lowpart (GET_MODE (x
), tem
);
8262 *x_ptr
= gen_lowpart (mode
, new_rtx
);
8263 *next_code_ptr
= next_code
;
8267 /* Look at the expression rooted at X. Look for expressions
8268 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8269 Form these expressions.
8271 Return the new rtx, usually just X.
8273 Also, for machines like the VAX that don't have logical shift insns,
8274 try to convert logical to arithmetic shift operations in cases where
8275 they are equivalent. This undoes the canonicalizations to logical
8276 shifts done elsewhere.
8278 We try, as much as possible, to re-use rtl expressions to save memory.
8280 IN_CODE says what kind of expression we are processing. Normally, it is
8281 SET. In a memory address it is MEM. When processing the arguments of
8282 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8283 precisely it is an equality comparison against zero. */
8286 make_compound_operation (rtx x
, enum rtx_code in_code
)
8288 enum rtx_code code
= GET_CODE (x
);
8291 enum rtx_code next_code
;
8294 /* Select the code to be used in recursive calls. Once we are inside an
8295 address, we stay there. If we have a comparison, set to COMPARE,
8296 but once inside, go back to our default of SET. */
8298 next_code
= (code
== MEM
? MEM
8299 : ((code
== COMPARE
|| COMPARISON_P (x
))
8300 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
8301 : in_code
== COMPARE
|| in_code
== EQ
? SET
: in_code
);
8303 scalar_int_mode mode
;
8304 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
8306 rtx new_rtx
= make_compound_operation_int (mode
, &x
, in_code
,
8310 code
= GET_CODE (x
);
8313 /* Now recursively process each operand of this operation. We need to
8314 handle ZERO_EXTEND specially so that we don't lose track of the
8316 if (code
== ZERO_EXTEND
)
8318 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8319 tem
= simplify_const_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8320 new_rtx
, GET_MODE (XEXP (x
, 0)));
8323 SUBST (XEXP (x
, 0), new_rtx
);
8327 fmt
= GET_RTX_FORMAT (code
);
8328 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
8331 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
8332 SUBST (XEXP (x
, i
), new_rtx
);
8334 else if (fmt
[i
] == 'E')
8335 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8337 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
8338 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
8341 maybe_swap_commutative_operands (x
);
8345 /* Given M see if it is a value that would select a field of bits
8346 within an item, but not the entire word. Return -1 if not.
8347 Otherwise, return the starting position of the field, where 0 is the
8350 *PLEN is set to the length of the field. */
8353 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
8355 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8356 int pos
= m
? ctz_hwi (m
) : -1;
8360 /* Now shift off the low-order zero bits and see if we have a
8361 power of two minus 1. */
8362 len
= exact_log2 ((m
>> pos
) + 1);
8371 /* If X refers to a register that equals REG in value, replace these
8372 references with REG. */
8374 canon_reg_for_combine (rtx x
, rtx reg
)
8381 enum rtx_code code
= GET_CODE (x
);
8382 switch (GET_RTX_CLASS (code
))
8385 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8386 if (op0
!= XEXP (x
, 0))
8387 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
8392 case RTX_COMM_ARITH
:
8393 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8394 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8395 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8396 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
8400 case RTX_COMM_COMPARE
:
8401 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8402 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8403 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8404 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
8405 GET_MODE (op0
), op0
, op1
);
8409 case RTX_BITFIELD_OPS
:
8410 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8411 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8412 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
8413 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
8414 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
8415 GET_MODE (op0
), op0
, op1
, op2
);
8421 if (rtx_equal_p (get_last_value (reg
), x
)
8422 || rtx_equal_p (reg
, get_last_value (x
)))
8431 fmt
= GET_RTX_FORMAT (code
);
8433 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8436 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
8437 if (op
!= XEXP (x
, i
))
8447 else if (fmt
[i
] == 'E')
8450 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8452 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
8453 if (op
!= XVECEXP (x
, i
, j
))
8460 XVECEXP (x
, i
, j
) = op
;
8471 /* Return X converted to MODE. If the value is already truncated to
8472 MODE we can just return a subreg even though in the general case we
8473 would need an explicit truncation. */
8476 gen_lowpart_or_truncate (machine_mode mode
, rtx x
)
8478 if (!CONST_INT_P (x
)
8479 && partial_subreg_p (mode
, GET_MODE (x
))
8480 && !TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (x
))
8481 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
8483 /* Bit-cast X into an integer mode. */
8484 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
8485 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)).require (), x
);
8486 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
).require (),
8490 return gen_lowpart (mode
, x
);
8493 /* See if X can be simplified knowing that we will only refer to it in
8494 MODE and will only refer to those bits that are nonzero in MASK.
8495 If other bits are being computed or if masking operations are done
8496 that select a superset of the bits in MASK, they can sometimes be
8499 Return a possibly simplified expression, but always convert X to
8500 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8502 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8503 are all off in X. This is used when X will be complemented, by either
8504 NOT, NEG, or XOR. */
8507 force_to_mode (rtx x
, machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8510 enum rtx_code code
= GET_CODE (x
);
8511 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8512 machine_mode op_mode
;
8513 unsigned HOST_WIDE_INT nonzero
;
8515 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8516 code below will do the wrong thing since the mode of such an
8517 expression is VOIDmode.
8519 Also do nothing if X is a CLOBBER; this can happen if X was
8520 the return value from a call to gen_lowpart. */
8521 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8524 /* We want to perform the operation in its present mode unless we know
8525 that the operation is valid in MODE, in which case we do the operation
8527 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8528 && have_insn_for (code
, mode
))
8529 ? mode
: GET_MODE (x
));
8531 /* It is not valid to do a right-shift in a narrower mode
8532 than the one it came in with. */
8533 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8534 && partial_subreg_p (mode
, GET_MODE (x
)))
8535 op_mode
= GET_MODE (x
);
8537 /* Truncate MASK to fit OP_MODE. */
8539 mask
&= GET_MODE_MASK (op_mode
);
8541 /* Determine what bits of X are guaranteed to be (non)zero. */
8542 nonzero
= nonzero_bits (x
, mode
);
8544 /* If none of the bits in X are needed, return a zero. */
8545 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8548 /* If X is a CONST_INT, return a new one. Do this here since the
8549 test below will fail. */
8550 if (CONST_INT_P (x
))
8552 if (SCALAR_INT_MODE_P (mode
))
8553 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8556 x
= GEN_INT (INTVAL (x
) & mask
);
8557 return gen_lowpart_common (mode
, x
);
8561 /* If X is narrower than MODE and we want all the bits in X's mode, just
8562 get X in the proper mode. */
8563 if (paradoxical_subreg_p (mode
, GET_MODE (x
))
8564 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8565 return gen_lowpart (mode
, x
);
8567 /* We can ignore the effect of a SUBREG if it narrows the mode or
8568 if the constant masks to zero all the bits the mode doesn't have. */
8569 if (GET_CODE (x
) == SUBREG
8570 && subreg_lowpart_p (x
)
8571 && (partial_subreg_p (x
)
8573 & GET_MODE_MASK (GET_MODE (x
))
8574 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))))))
8575 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8577 scalar_int_mode int_mode
, xmode
;
8578 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
8579 && is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
8580 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8582 return force_int_to_mode (x
, int_mode
, xmode
,
8583 as_a
<scalar_int_mode
> (op_mode
),
8586 return gen_lowpart_or_truncate (mode
, x
);
8589 /* Subroutine of force_to_mode that handles cases in which both X and
8590 the result are scalar integers. MODE is the mode of the result,
8591 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8592 is preferred for simplified versions of X. The other arguments
8593 are as for force_to_mode. */
8596 force_int_to_mode (rtx x
, scalar_int_mode mode
, scalar_int_mode xmode
,
8597 scalar_int_mode op_mode
, unsigned HOST_WIDE_INT mask
,
8600 enum rtx_code code
= GET_CODE (x
);
8601 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8602 unsigned HOST_WIDE_INT fuller_mask
;
8605 /* When we have an arithmetic operation, or a shift whose count we
8606 do not know, we need to assume that all bits up to the highest-order
8607 bit in MASK will be needed. This is how we form such a mask. */
8608 if (mask
& (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
8609 fuller_mask
= HOST_WIDE_INT_M1U
;
8611 fuller_mask
= ((HOST_WIDE_INT_1U
<< (floor_log2 (mask
) + 1))
8617 /* If X is a (clobber (const_int)), return it since we know we are
8618 generating something that won't match. */
8625 x
= expand_compound_operation (x
);
8626 if (GET_CODE (x
) != code
)
8627 return force_to_mode (x
, mode
, mask
, next_select
);
8631 /* Similarly for a truncate. */
8632 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8635 /* If this is an AND with a constant, convert it into an AND
8636 whose constant is the AND of that constant with MASK. If it
8637 remains an AND of MASK, delete it since it is redundant. */
8639 if (CONST_INT_P (XEXP (x
, 1)))
8641 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8642 mask
& INTVAL (XEXP (x
, 1)));
8645 /* If X is still an AND, see if it is an AND with a mask that
8646 is just some low-order bits. If so, and it is MASK, we don't
8649 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8650 && (INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (xmode
)) == mask
)
8653 /* If it remains an AND, try making another AND with the bits
8654 in the mode mask that aren't in MASK turned on. If the
8655 constant in the AND is wide enough, this might make a
8656 cheaper constant. */
8658 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8659 && GET_MODE_MASK (xmode
) != mask
8660 && HWI_COMPUTABLE_MODE_P (xmode
))
8662 unsigned HOST_WIDE_INT cval
8663 = UINTVAL (XEXP (x
, 1)) | (GET_MODE_MASK (xmode
) & ~mask
);
8666 y
= simplify_gen_binary (AND
, xmode
, XEXP (x
, 0),
8667 gen_int_mode (cval
, xmode
));
8668 if (set_src_cost (y
, xmode
, optimize_this_for_speed_p
)
8669 < set_src_cost (x
, xmode
, optimize_this_for_speed_p
))
8679 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8680 low-order bits (as in an alignment operation) and FOO is already
8681 aligned to that boundary, mask C1 to that boundary as well.
8682 This may eliminate that PLUS and, later, the AND. */
8685 unsigned int width
= GET_MODE_PRECISION (mode
);
8686 unsigned HOST_WIDE_INT smask
= mask
;
8688 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8689 number, sign extend it. */
8691 if (width
< HOST_BITS_PER_WIDE_INT
8692 && (smask
& (HOST_WIDE_INT_1U
<< (width
- 1))) != 0)
8693 smask
|= HOST_WIDE_INT_M1U
<< width
;
8695 if (CONST_INT_P (XEXP (x
, 1))
8696 && pow2p_hwi (- smask
)
8697 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8698 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8699 return force_to_mode (plus_constant (xmode
, XEXP (x
, 0),
8700 (INTVAL (XEXP (x
, 1)) & smask
)),
8701 mode
, smask
, next_select
);
8707 /* Substituting into the operands of a widening MULT is not likely to
8708 create RTL matching a machine insn. */
8710 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
8711 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
8712 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
8713 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
8714 && REG_P (XEXP (XEXP (x
, 0), 0))
8715 && REG_P (XEXP (XEXP (x
, 1), 0)))
8716 return gen_lowpart_or_truncate (mode
, x
);
8718 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8719 most significant bit in MASK since carries from those bits will
8720 affect the bits we are interested in. */
8725 /* If X is (minus C Y) where C's least set bit is larger than any bit
8726 in the mask, then we may replace with (neg Y). */
8727 if (CONST_INT_P (XEXP (x
, 0))
8728 && least_bit_hwi (UINTVAL (XEXP (x
, 0))) > mask
)
8730 x
= simplify_gen_unary (NEG
, xmode
, XEXP (x
, 1), xmode
);
8731 return force_to_mode (x
, mode
, mask
, next_select
);
8734 /* Similarly, if C contains every bit in the fuller_mask, then we may
8735 replace with (not Y). */
8736 if (CONST_INT_P (XEXP (x
, 0))
8737 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8739 x
= simplify_gen_unary (NOT
, xmode
, XEXP (x
, 1), xmode
);
8740 return force_to_mode (x
, mode
, mask
, next_select
);
8748 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8749 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8750 operation which may be a bitfield extraction. Ensure that the
8751 constant we form is not wider than the mode of X. */
8753 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8754 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8755 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8756 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8757 && CONST_INT_P (XEXP (x
, 1))
8758 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8759 + floor_log2 (INTVAL (XEXP (x
, 1))))
8760 < GET_MODE_PRECISION (xmode
))
8761 && (UINTVAL (XEXP (x
, 1))
8762 & ~nonzero_bits (XEXP (x
, 0), xmode
)) == 0)
8764 temp
= gen_int_mode ((INTVAL (XEXP (x
, 1)) & mask
)
8765 << INTVAL (XEXP (XEXP (x
, 0), 1)),
8767 temp
= simplify_gen_binary (GET_CODE (x
), xmode
,
8768 XEXP (XEXP (x
, 0), 0), temp
);
8769 x
= simplify_gen_binary (LSHIFTRT
, xmode
, temp
,
8770 XEXP (XEXP (x
, 0), 1));
8771 return force_to_mode (x
, mode
, mask
, next_select
);
8775 /* For most binary operations, just propagate into the operation and
8776 change the mode if we have an operation of that mode. */
8778 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8779 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
8781 /* If we ended up truncating both operands, truncate the result of the
8782 operation instead. */
8783 if (GET_CODE (op0
) == TRUNCATE
8784 && GET_CODE (op1
) == TRUNCATE
)
8786 op0
= XEXP (op0
, 0);
8787 op1
= XEXP (op1
, 0);
8790 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
8791 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
8793 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8795 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
8801 /* For left shifts, do the same, but just for the first operand.
8802 However, we cannot do anything with shifts where we cannot
8803 guarantee that the counts are smaller than the size of the mode
8804 because such a count will have a different meaning in a
8807 if (! (CONST_INT_P (XEXP (x
, 1))
8808 && INTVAL (XEXP (x
, 1)) >= 0
8809 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (mode
))
8810 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
8811 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
8812 < (unsigned HOST_WIDE_INT
) GET_MODE_PRECISION (mode
))))
8815 /* If the shift count is a constant and we can do arithmetic in
8816 the mode of the shift, refine which bits we need. Otherwise, use the
8817 conservative form of the mask. */
8818 if (CONST_INT_P (XEXP (x
, 1))
8819 && INTVAL (XEXP (x
, 1)) >= 0
8820 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (op_mode
)
8821 && HWI_COMPUTABLE_MODE_P (op_mode
))
8822 mask
>>= INTVAL (XEXP (x
, 1));
8826 op0
= gen_lowpart_or_truncate (op_mode
,
8827 force_to_mode (XEXP (x
, 0), op_mode
,
8828 mask
, next_select
));
8830 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
8832 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
8838 /* Here we can only do something if the shift count is a constant,
8839 this shift constant is valid for the host, and we can do arithmetic
8842 if (CONST_INT_P (XEXP (x
, 1))
8843 && INTVAL (XEXP (x
, 1)) >= 0
8844 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8845 && HWI_COMPUTABLE_MODE_P (op_mode
))
8847 rtx inner
= XEXP (x
, 0);
8848 unsigned HOST_WIDE_INT inner_mask
;
8850 /* Select the mask of the bits we need for the shift operand. */
8851 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
8853 /* We can only change the mode of the shift if we can do arithmetic
8854 in the mode of the shift and INNER_MASK is no wider than the
8855 width of X's mode. */
8856 if ((inner_mask
& ~GET_MODE_MASK (xmode
)) != 0)
8859 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
8861 if (xmode
!= op_mode
|| inner
!= XEXP (x
, 0))
8863 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
8868 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8869 shift and AND produces only copies of the sign bit (C2 is one less
8870 than a power of two), we can do this with just a shift. */
8872 if (GET_CODE (x
) == LSHIFTRT
8873 && CONST_INT_P (XEXP (x
, 1))
8874 /* The shift puts one of the sign bit copies in the least significant
8876 && ((INTVAL (XEXP (x
, 1))
8877 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
8878 >= GET_MODE_PRECISION (xmode
))
8879 && pow2p_hwi (mask
+ 1)
8880 /* Number of bits left after the shift must be more than the mask
8882 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
8883 <= GET_MODE_PRECISION (xmode
))
8884 /* Must be more sign bit copies than the mask needs. */
8885 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
8886 >= exact_log2 (mask
+ 1)))
8887 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0),
8888 GEN_INT (GET_MODE_PRECISION (xmode
)
8889 - exact_log2 (mask
+ 1)));
8894 /* If we are just looking for the sign bit, we don't need this shift at
8895 all, even if it has a variable count. */
8896 if (val_signbit_p (xmode
, mask
))
8897 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8899 /* If this is a shift by a constant, get a mask that contains those bits
8900 that are not copies of the sign bit. We then have two cases: If
8901 MASK only includes those bits, this can be a logical shift, which may
8902 allow simplifications. If MASK is a single-bit field not within
8903 those bits, we are requesting a copy of the sign bit and hence can
8904 shift the sign bit to the appropriate location. */
8906 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
8907 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
8909 unsigned HOST_WIDE_INT nonzero
;
8912 /* If the considered data is wider than HOST_WIDE_INT, we can't
8913 represent a mask for all its bits in a single scalar.
8914 But we only care about the lower bits, so calculate these. */
8916 if (GET_MODE_PRECISION (xmode
) > HOST_BITS_PER_WIDE_INT
)
8918 nonzero
= HOST_WIDE_INT_M1U
;
8920 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8921 is the number of bits a full-width mask would have set.
8922 We need only shift if these are fewer than nonzero can
8923 hold. If not, we must keep all bits set in nonzero. */
8925 if (GET_MODE_PRECISION (xmode
) - INTVAL (XEXP (x
, 1))
8926 < HOST_BITS_PER_WIDE_INT
)
8927 nonzero
>>= INTVAL (XEXP (x
, 1))
8928 + HOST_BITS_PER_WIDE_INT
8929 - GET_MODE_PRECISION (xmode
);
8933 nonzero
= GET_MODE_MASK (xmode
);
8934 nonzero
>>= INTVAL (XEXP (x
, 1));
8937 if ((mask
& ~nonzero
) == 0)
8939 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, xmode
,
8940 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
8941 if (GET_CODE (x
) != ASHIFTRT
)
8942 return force_to_mode (x
, mode
, mask
, next_select
);
8945 else if ((i
= exact_log2 (mask
)) >= 0)
8947 x
= simplify_shift_const
8948 (NULL_RTX
, LSHIFTRT
, xmode
, XEXP (x
, 0),
8949 GET_MODE_PRECISION (xmode
) - 1 - i
);
8951 if (GET_CODE (x
) != ASHIFTRT
)
8952 return force_to_mode (x
, mode
, mask
, next_select
);
8956 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8957 even if the shift count isn't a constant. */
8959 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0), XEXP (x
, 1));
8963 /* If this is a zero- or sign-extension operation that just affects bits
8964 we don't care about, remove it. Be sure the call above returned
8965 something that is still a shift. */
8967 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
8968 && CONST_INT_P (XEXP (x
, 1))
8969 && INTVAL (XEXP (x
, 1)) >= 0
8970 && (INTVAL (XEXP (x
, 1))
8971 <= GET_MODE_PRECISION (xmode
) - (floor_log2 (mask
) + 1))
8972 && GET_CODE (XEXP (x
, 0)) == ASHIFT
8973 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
8974 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
8981 /* If the shift count is constant and we can do computations
8982 in the mode of X, compute where the bits we care about are.
8983 Otherwise, we can't do anything. Don't change the mode of
8984 the shift or propagate MODE into the shift, though. */
8985 if (CONST_INT_P (XEXP (x
, 1))
8986 && INTVAL (XEXP (x
, 1)) >= 0)
8988 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
8989 xmode
, gen_int_mode (mask
, xmode
),
8991 if (temp
&& CONST_INT_P (temp
))
8992 x
= simplify_gen_binary (code
, xmode
,
8993 force_to_mode (XEXP (x
, 0), xmode
,
8994 INTVAL (temp
), next_select
),
9000 /* If we just want the low-order bit, the NEG isn't needed since it
9001 won't change the low-order bit. */
9003 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
9005 /* We need any bits less significant than the most significant bit in
9006 MASK since carries from those bits will affect the bits we are
9012 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9013 same as the XOR case above. Ensure that the constant we form is not
9014 wider than the mode of X. */
9016 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
9017 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
9018 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
9019 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
9020 < GET_MODE_PRECISION (xmode
))
9021 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
9023 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)), xmode
);
9024 temp
= simplify_gen_binary (XOR
, xmode
, XEXP (XEXP (x
, 0), 0), temp
);
9025 x
= simplify_gen_binary (LSHIFTRT
, xmode
,
9026 temp
, XEXP (XEXP (x
, 0), 1));
9028 return force_to_mode (x
, mode
, mask
, next_select
);
9031 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9032 use the full mask inside the NOT. */
9036 op0
= gen_lowpart_or_truncate (op_mode
,
9037 force_to_mode (XEXP (x
, 0), mode
, mask
,
9039 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
9041 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
9047 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9048 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9049 which is equal to STORE_FLAG_VALUE. */
9050 if ((mask
& ~STORE_FLAG_VALUE
) == 0
9051 && XEXP (x
, 1) == const0_rtx
9052 && GET_MODE (XEXP (x
, 0)) == mode
9053 && pow2p_hwi (nonzero_bits (XEXP (x
, 0), mode
))
9054 && (nonzero_bits (XEXP (x
, 0), mode
)
9055 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
9056 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
9061 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9062 written in a narrower mode. We play it safe and do not do so. */
9064 op0
= gen_lowpart_or_truncate (xmode
,
9065 force_to_mode (XEXP (x
, 1), mode
,
9066 mask
, next_select
));
9067 op1
= gen_lowpart_or_truncate (xmode
,
9068 force_to_mode (XEXP (x
, 2), mode
,
9069 mask
, next_select
));
9070 if (op0
!= XEXP (x
, 1) || op1
!= XEXP (x
, 2))
9071 x
= simplify_gen_ternary (IF_THEN_ELSE
, xmode
,
9072 GET_MODE (XEXP (x
, 0)), XEXP (x
, 0),
9080 /* Ensure we return a value of the proper mode. */
9081 return gen_lowpart_or_truncate (mode
, x
);
9084 /* Return nonzero if X is an expression that has one of two values depending on
9085 whether some other value is zero or nonzero. In that case, we return the
9086 value that is being tested, *PTRUE is set to the value if the rtx being
9087 returned has a nonzero value, and *PFALSE is set to the other alternative.
9089 If we return zero, we set *PTRUE and *PFALSE to X. */
9092 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
9094 machine_mode mode
= GET_MODE (x
);
9095 enum rtx_code code
= GET_CODE (x
);
9096 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
9097 unsigned HOST_WIDE_INT nz
;
9098 scalar_int_mode int_mode
;
9100 /* If we are comparing a value against zero, we are done. */
9101 if ((code
== NE
|| code
== EQ
)
9102 && XEXP (x
, 1) == const0_rtx
)
9104 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
9105 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
9109 /* If this is a unary operation whose operand has one of two values, apply
9110 our opcode to compute those values. */
9111 else if (UNARY_P (x
)
9112 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
9114 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
9115 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
9116 GET_MODE (XEXP (x
, 0)));
9120 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9121 make can't possibly match and would suppress other optimizations. */
9122 else if (code
== COMPARE
)
9125 /* If this is a binary operation, see if either side has only one of two
9126 values. If either one does or if both do and they are conditional on
9127 the same value, compute the new true and false values. */
9128 else if (BINARY_P (x
))
9130 rtx op0
= XEXP (x
, 0);
9131 rtx op1
= XEXP (x
, 1);
9132 cond0
= if_then_else_cond (op0
, &true0
, &false0
);
9133 cond1
= if_then_else_cond (op1
, &true1
, &false1
);
9135 if ((cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
))
9136 && (REG_P (op0
) || REG_P (op1
)))
9138 /* Try to enable a simplification by undoing work done by
9139 if_then_else_cond if it converted a REG into something more
9144 true0
= false0
= op0
;
9149 true1
= false1
= op1
;
9153 if ((cond0
!= 0 || cond1
!= 0)
9154 && ! (cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
)))
9156 /* If if_then_else_cond returned zero, then true/false are the
9157 same rtl. We must copy one of them to prevent invalid rtl
9160 true0
= copy_rtx (true0
);
9161 else if (cond1
== 0)
9162 true1
= copy_rtx (true1
);
9164 if (COMPARISON_P (x
))
9166 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
9168 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
9173 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
9174 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
9177 return cond0
? cond0
: cond1
;
9180 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9181 operands is zero when the other is nonzero, and vice-versa,
9182 and STORE_FLAG_VALUE is 1 or -1. */
9184 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9185 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
9187 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9189 rtx op0
= XEXP (XEXP (x
, 0), 1);
9190 rtx op1
= XEXP (XEXP (x
, 1), 1);
9192 cond0
= XEXP (XEXP (x
, 0), 0);
9193 cond1
= XEXP (XEXP (x
, 1), 0);
9195 if (COMPARISON_P (cond0
)
9196 && COMPARISON_P (cond1
)
9197 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9198 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9199 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9200 || ((swap_condition (GET_CODE (cond0
))
9201 == reversed_comparison_code (cond1
, NULL
))
9202 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9203 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9204 && ! side_effects_p (x
))
9206 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
9207 *pfalse
= simplify_gen_binary (MULT
, mode
,
9209 ? simplify_gen_unary (NEG
, mode
,
9217 /* Similarly for MULT, AND and UMIN, except that for these the result
9219 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9220 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
9221 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9223 cond0
= XEXP (XEXP (x
, 0), 0);
9224 cond1
= XEXP (XEXP (x
, 1), 0);
9226 if (COMPARISON_P (cond0
)
9227 && COMPARISON_P (cond1
)
9228 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9229 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9230 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9231 || ((swap_condition (GET_CODE (cond0
))
9232 == reversed_comparison_code (cond1
, NULL
))
9233 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9234 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9235 && ! side_effects_p (x
))
9237 *ptrue
= *pfalse
= const0_rtx
;
9243 else if (code
== IF_THEN_ELSE
)
9245 /* If we have IF_THEN_ELSE already, extract the condition and
9246 canonicalize it if it is NE or EQ. */
9247 cond0
= XEXP (x
, 0);
9248 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
9249 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
9250 return XEXP (cond0
, 0);
9251 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
9253 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
9254 return XEXP (cond0
, 0);
9260 /* If X is a SUBREG, we can narrow both the true and false values
9261 if the inner expression, if there is a condition. */
9262 else if (code
== SUBREG
9263 && 0 != (cond0
= if_then_else_cond (SUBREG_REG (x
),
9266 true0
= simplify_gen_subreg (mode
, true0
,
9267 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9268 false0
= simplify_gen_subreg (mode
, false0
,
9269 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9270 if (true0
&& false0
)
9278 /* If X is a constant, this isn't special and will cause confusions
9279 if we treat it as such. Likewise if it is equivalent to a constant. */
9280 else if (CONSTANT_P (x
)
9281 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
9284 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9285 will be least confusing to the rest of the compiler. */
9286 else if (mode
== BImode
)
9288 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
9292 /* If X is known to be either 0 or -1, those are the true and
9293 false values when testing X. */
9294 else if (x
== constm1_rtx
|| x
== const0_rtx
9295 || (is_a
<scalar_int_mode
> (mode
, &int_mode
)
9296 && (num_sign_bit_copies (x
, int_mode
)
9297 == GET_MODE_PRECISION (int_mode
))))
9299 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
9303 /* Likewise for 0 or a single bit. */
9304 else if (HWI_COMPUTABLE_MODE_P (mode
)
9305 && pow2p_hwi (nz
= nonzero_bits (x
, mode
)))
9307 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
9311 /* Otherwise fail; show no condition with true and false values the same. */
9312 *ptrue
= *pfalse
= x
;
9316 /* Return the value of expression X given the fact that condition COND
9317 is known to be true when applied to REG as its first operand and VAL
9318 as its second. X is known to not be shared and so can be modified in
9321 We only handle the simplest cases, and specifically those cases that
9322 arise with IF_THEN_ELSE expressions. */
9325 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
9327 enum rtx_code code
= GET_CODE (x
);
9331 if (side_effects_p (x
))
9334 /* If either operand of the condition is a floating point value,
9335 then we have to avoid collapsing an EQ comparison. */
9337 && rtx_equal_p (x
, reg
)
9338 && ! FLOAT_MODE_P (GET_MODE (x
))
9339 && ! FLOAT_MODE_P (GET_MODE (val
)))
9342 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
9345 /* If X is (abs REG) and we know something about REG's relationship
9346 with zero, we may be able to simplify this. */
9348 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
9351 case GE
: case GT
: case EQ
:
9354 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
9356 GET_MODE (XEXP (x
, 0)));
9361 /* The only other cases we handle are MIN, MAX, and comparisons if the
9362 operands are the same as REG and VAL. */
9364 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
9366 if (rtx_equal_p (XEXP (x
, 0), val
))
9368 std::swap (val
, reg
);
9369 cond
= swap_condition (cond
);
9372 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
9374 if (COMPARISON_P (x
))
9376 if (comparison_dominates_p (cond
, code
))
9377 return const_true_rtx
;
9379 code
= reversed_comparison_code (x
, NULL
);
9381 && comparison_dominates_p (cond
, code
))
9386 else if (code
== SMAX
|| code
== SMIN
9387 || code
== UMIN
|| code
== UMAX
)
9389 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
9391 /* Do not reverse the condition when it is NE or EQ.
9392 This is because we cannot conclude anything about
9393 the value of 'SMAX (x, y)' when x is not equal to y,
9394 but we can when x equals y. */
9395 if ((code
== SMAX
|| code
== UMAX
)
9396 && ! (cond
== EQ
|| cond
== NE
))
9397 cond
= reverse_condition (cond
);
9402 return unsignedp
? x
: XEXP (x
, 1);
9404 return unsignedp
? x
: XEXP (x
, 0);
9406 return unsignedp
? XEXP (x
, 1) : x
;
9408 return unsignedp
? XEXP (x
, 0) : x
;
9415 else if (code
== SUBREG
)
9417 machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
9418 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
9420 if (SUBREG_REG (x
) != r
)
9422 /* We must simplify subreg here, before we lose track of the
9423 original inner_mode. */
9424 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
9425 inner_mode
, SUBREG_BYTE (x
));
9429 SUBST (SUBREG_REG (x
), r
);
9434 /* We don't have to handle SIGN_EXTEND here, because even in the
9435 case of replacing something with a modeless CONST_INT, a
9436 CONST_INT is already (supposed to be) a valid sign extension for
9437 its narrower mode, which implies it's already properly
9438 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9439 story is different. */
9440 else if (code
== ZERO_EXTEND
)
9442 machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
9443 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
9445 if (XEXP (x
, 0) != r
)
9447 /* We must simplify the zero_extend here, before we lose
9448 track of the original inner_mode. */
9449 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
9454 SUBST (XEXP (x
, 0), r
);
9460 fmt
= GET_RTX_FORMAT (code
);
9461 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9464 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
9465 else if (fmt
[i
] == 'E')
9466 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9467 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
9474 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9475 assignment as a field assignment. */
9478 rtx_equal_for_field_assignment_p (rtx x
, rtx y
, bool widen_x
)
9480 if (widen_x
&& GET_MODE (x
) != GET_MODE (y
))
9482 if (paradoxical_subreg_p (GET_MODE (x
), GET_MODE (y
)))
9484 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
9486 /* For big endian, adjust the memory offset. */
9487 if (BYTES_BIG_ENDIAN
)
9488 x
= adjust_address_nv (x
, GET_MODE (y
),
9489 -subreg_lowpart_offset (GET_MODE (x
),
9492 x
= adjust_address_nv (x
, GET_MODE (y
), 0);
9495 if (x
== y
|| rtx_equal_p (x
, y
))
9498 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
9501 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9502 Note that all SUBREGs of MEM are paradoxical; otherwise they
9503 would have been rewritten. */
9504 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
9505 && MEM_P (SUBREG_REG (y
))
9506 && rtx_equal_p (SUBREG_REG (y
),
9507 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
9510 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
9511 && MEM_P (SUBREG_REG (x
))
9512 && rtx_equal_p (SUBREG_REG (x
),
9513 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
9516 /* We used to see if get_last_value of X and Y were the same but that's
9517 not correct. In one direction, we'll cause the assignment to have
9518 the wrong destination and in the case, we'll import a register into this
9519 insn that might have already have been dead. So fail if none of the
9520 above cases are true. */
9524 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9525 Return that assignment if so.
9527 We only handle the most common cases. */
9530 make_field_assignment (rtx x
)
9532 rtx dest
= SET_DEST (x
);
9533 rtx src
= SET_SRC (x
);
9538 unsigned HOST_WIDE_INT len
;
9541 /* All the rules in this function are specific to scalar integers. */
9542 scalar_int_mode mode
;
9543 if (!is_a
<scalar_int_mode
> (GET_MODE (dest
), &mode
))
9546 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9547 a clear of a one-bit field. We will have changed it to
9548 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9551 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
9552 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
9553 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
9554 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9556 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9559 return gen_rtx_SET (assign
, const0_rtx
);
9563 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
9564 && subreg_lowpart_p (XEXP (src
, 0))
9565 && partial_subreg_p (XEXP (src
, 0))
9566 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9567 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9568 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9569 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9571 assign
= make_extraction (VOIDmode
, dest
, 0,
9572 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9575 return gen_rtx_SET (assign
, const0_rtx
);
9579 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9581 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9582 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9583 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9585 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9588 return gen_rtx_SET (assign
, const1_rtx
);
9592 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9593 SRC is an AND with all bits of that field set, then we can discard
9595 if (GET_CODE (dest
) == ZERO_EXTRACT
9596 && CONST_INT_P (XEXP (dest
, 1))
9597 && GET_CODE (src
) == AND
9598 && CONST_INT_P (XEXP (src
, 1)))
9600 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9601 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9602 unsigned HOST_WIDE_INT ze_mask
;
9604 if (width
>= HOST_BITS_PER_WIDE_INT
)
9607 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9609 /* Complete overlap. We can remove the source AND. */
9610 if ((and_mask
& ze_mask
) == ze_mask
)
9611 return gen_rtx_SET (dest
, XEXP (src
, 0));
9613 /* Partial overlap. We can reduce the source AND. */
9614 if ((and_mask
& ze_mask
) != and_mask
)
9616 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9617 gen_int_mode (and_mask
& ze_mask
, mode
));
9618 return gen_rtx_SET (dest
, src
);
9622 /* The other case we handle is assignments into a constant-position
9623 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9624 a mask that has all one bits except for a group of zero bits and
9625 OTHER is known to have zeros where C1 has ones, this is such an
9626 assignment. Compute the position and length from C1. Shift OTHER
9627 to the appropriate position, force it to the required mode, and
9628 make the extraction. Check for the AND in both operands. */
9630 /* One or more SUBREGs might obscure the constant-position field
9631 assignment. The first one we are likely to encounter is an outer
9632 narrowing SUBREG, which we can just strip for the purposes of
9633 identifying the constant-field assignment. */
9634 scalar_int_mode src_mode
= mode
;
9635 if (GET_CODE (src
) == SUBREG
9636 && subreg_lowpart_p (src
)
9637 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (src
)), &src_mode
))
9638 src
= SUBREG_REG (src
);
9640 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9643 rhs
= expand_compound_operation (XEXP (src
, 0));
9644 lhs
= expand_compound_operation (XEXP (src
, 1));
9646 if (GET_CODE (rhs
) == AND
9647 && CONST_INT_P (XEXP (rhs
, 1))
9648 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9649 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9650 /* The second SUBREG that might get in the way is a paradoxical
9651 SUBREG around the first operand of the AND. We want to
9652 pretend the operand is as wide as the destination here. We
9653 do this by adjusting the MEM to wider mode for the sole
9654 purpose of the call to rtx_equal_for_field_assignment_p. Also
9655 note this trick only works for MEMs. */
9656 else if (GET_CODE (rhs
) == AND
9657 && paradoxical_subreg_p (XEXP (rhs
, 0))
9658 && MEM_P (SUBREG_REG (XEXP (rhs
, 0)))
9659 && CONST_INT_P (XEXP (rhs
, 1))
9660 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs
, 0)),
9662 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9663 else if (GET_CODE (lhs
) == AND
9664 && CONST_INT_P (XEXP (lhs
, 1))
9665 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9666 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9667 /* The second SUBREG that might get in the way is a paradoxical
9668 SUBREG around the first operand of the AND. We want to
9669 pretend the operand is as wide as the destination here. We
9670 do this by adjusting the MEM to wider mode for the sole
9671 purpose of the call to rtx_equal_for_field_assignment_p. Also
9672 note this trick only works for MEMs. */
9673 else if (GET_CODE (lhs
) == AND
9674 && paradoxical_subreg_p (XEXP (lhs
, 0))
9675 && MEM_P (SUBREG_REG (XEXP (lhs
, 0)))
9676 && CONST_INT_P (XEXP (lhs
, 1))
9677 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs
, 0)),
9679 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9683 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (mode
), &len
);
9685 || pos
+ len
> GET_MODE_PRECISION (mode
)
9686 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
9687 || (c1
& nonzero_bits (other
, mode
)) != 0)
9690 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9694 /* The mode to use for the source is the mode of the assignment, or of
9695 what is inside a possible STRICT_LOW_PART. */
9696 machine_mode new_mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9697 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9699 /* Shift OTHER right POS places and make it the source, restricting it
9700 to the proper length and mode. */
9702 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9703 src_mode
, other
, pos
),
9705 src
= force_to_mode (src
, new_mode
,
9706 len
>= HOST_BITS_PER_WIDE_INT
9708 : (HOST_WIDE_INT_1U
<< len
) - 1,
9711 /* If SRC is masked by an AND that does not make a difference in
9712 the value being stored, strip it. */
9713 if (GET_CODE (assign
) == ZERO_EXTRACT
9714 && CONST_INT_P (XEXP (assign
, 1))
9715 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9716 && GET_CODE (src
) == AND
9717 && CONST_INT_P (XEXP (src
, 1))
9718 && UINTVAL (XEXP (src
, 1))
9719 == (HOST_WIDE_INT_1U
<< INTVAL (XEXP (assign
, 1))) - 1)
9720 src
= XEXP (src
, 0);
9722 return gen_rtx_SET (assign
, src
);
9725 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9729 apply_distributive_law (rtx x
)
9731 enum rtx_code code
= GET_CODE (x
);
9732 enum rtx_code inner_code
;
9733 rtx lhs
, rhs
, other
;
9736 /* Distributivity is not true for floating point as it can change the
9737 value. So we don't do it unless -funsafe-math-optimizations. */
9738 if (FLOAT_MODE_P (GET_MODE (x
))
9739 && ! flag_unsafe_math_optimizations
)
9742 /* The outer operation can only be one of the following: */
9743 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9744 && code
!= PLUS
&& code
!= MINUS
)
9750 /* If either operand is a primitive we can't do anything, so get out
9752 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9755 lhs
= expand_compound_operation (lhs
);
9756 rhs
= expand_compound_operation (rhs
);
9757 inner_code
= GET_CODE (lhs
);
9758 if (inner_code
!= GET_CODE (rhs
))
9761 /* See if the inner and outer operations distribute. */
9768 /* These all distribute except over PLUS. */
9769 if (code
== PLUS
|| code
== MINUS
)
9774 if (code
!= PLUS
&& code
!= MINUS
)
9779 /* This is also a multiply, so it distributes over everything. */
9782 /* This used to handle SUBREG, but this turned out to be counter-
9783 productive, since (subreg (op ...)) usually is not handled by
9784 insn patterns, and this "optimization" therefore transformed
9785 recognizable patterns into unrecognizable ones. Therefore the
9786 SUBREG case was removed from here.
9788 It is possible that distributing SUBREG over arithmetic operations
9789 leads to an intermediate result than can then be optimized further,
9790 e.g. by moving the outer SUBREG to the other side of a SET as done
9791 in simplify_set. This seems to have been the original intent of
9792 handling SUBREGs here.
9794 However, with current GCC this does not appear to actually happen,
9795 at least on major platforms. If some case is found where removing
9796 the SUBREG case here prevents follow-on optimizations, distributing
9797 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9803 /* Set LHS and RHS to the inner operands (A and B in the example
9804 above) and set OTHER to the common operand (C in the example).
9805 There is only one way to do this unless the inner operation is
9807 if (COMMUTATIVE_ARITH_P (lhs
)
9808 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
9809 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
9810 else if (COMMUTATIVE_ARITH_P (lhs
)
9811 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
9812 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
9813 else if (COMMUTATIVE_ARITH_P (lhs
)
9814 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
9815 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
9816 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
9817 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
9821 /* Form the new inner operation, seeing if it simplifies first. */
9822 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
9824 /* There is one exception to the general way of distributing:
9825 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9826 if (code
== XOR
&& inner_code
== IOR
)
9829 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
9832 /* We may be able to continuing distributing the result, so call
9833 ourselves recursively on the inner operation before forming the
9834 outer operation, which we return. */
9835 return simplify_gen_binary (inner_code
, GET_MODE (x
),
9836 apply_distributive_law (tem
), other
);
9839 /* See if X is of the form (* (+ A B) C), and if so convert to
9840 (+ (* A C) (* B C)) and try to simplify.
9842 Most of the time, this results in no change. However, if some of
9843 the operands are the same or inverses of each other, simplifications
9846 For example, (and (ior A B) (not B)) can occur as the result of
9847 expanding a bit field assignment. When we apply the distributive
9848 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9849 which then simplifies to (and (A (not B))).
9851 Note that no checks happen on the validity of applying the inverse
9852 distributive law. This is pointless since we can do it in the
9853 few places where this routine is called.
9855 N is the index of the term that is decomposed (the arithmetic operation,
9856 i.e. (+ A B) in the first example above). !N is the index of the term that
9857 is distributed, i.e. of C in the first example above. */
9859 distribute_and_simplify_rtx (rtx x
, int n
)
9862 enum rtx_code outer_code
, inner_code
;
9863 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
9865 /* Distributivity is not true for floating point as it can change the
9866 value. So we don't do it unless -funsafe-math-optimizations. */
9867 if (FLOAT_MODE_P (GET_MODE (x
))
9868 && ! flag_unsafe_math_optimizations
)
9871 decomposed
= XEXP (x
, n
);
9872 if (!ARITHMETIC_P (decomposed
))
9875 mode
= GET_MODE (x
);
9876 outer_code
= GET_CODE (x
);
9877 distributed
= XEXP (x
, !n
);
9879 inner_code
= GET_CODE (decomposed
);
9880 inner_op0
= XEXP (decomposed
, 0);
9881 inner_op1
= XEXP (decomposed
, 1);
9883 /* Special case (and (xor B C) (not A)), which is equivalent to
9884 (xor (ior A B) (ior A C)) */
9885 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
9887 distributed
= XEXP (distributed
, 0);
9893 /* Distribute the second term. */
9894 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
9895 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
9899 /* Distribute the first term. */
9900 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
9901 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
9904 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
9906 if (GET_CODE (tmp
) != outer_code
9907 && (set_src_cost (tmp
, mode
, optimize_this_for_speed_p
)
9908 < set_src_cost (x
, mode
, optimize_this_for_speed_p
)))
9914 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9915 in MODE. Return an equivalent form, if different from (and VAROP
9916 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9919 simplify_and_const_int_1 (scalar_int_mode mode
, rtx varop
,
9920 unsigned HOST_WIDE_INT constop
)
9922 unsigned HOST_WIDE_INT nonzero
;
9923 unsigned HOST_WIDE_INT orig_constop
;
9928 orig_constop
= constop
;
9929 if (GET_CODE (varop
) == CLOBBER
)
9932 /* Simplify VAROP knowing that we will be only looking at some of the
9935 Note by passing in CONSTOP, we guarantee that the bits not set in
9936 CONSTOP are not significant and will never be examined. We must
9937 ensure that is the case by explicitly masking out those bits
9938 before returning. */
9939 varop
= force_to_mode (varop
, mode
, constop
, 0);
9941 /* If VAROP is a CLOBBER, we will fail so return it. */
9942 if (GET_CODE (varop
) == CLOBBER
)
9945 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9946 to VAROP and return the new constant. */
9947 if (CONST_INT_P (varop
))
9948 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
9950 /* See what bits may be nonzero in VAROP. Unlike the general case of
9951 a call to nonzero_bits, here we don't care about bits outside
9954 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
9956 /* Turn off all bits in the constant that are known to already be zero.
9957 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9958 which is tested below. */
9962 /* If we don't have any bits left, return zero. */
9966 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9967 a power of two, we can replace this with an ASHIFT. */
9968 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
9969 && (i
= exact_log2 (constop
)) >= 0)
9970 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
9972 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9973 or XOR, then try to apply the distributive law. This may eliminate
9974 operations if either branch can be simplified because of the AND.
9975 It may also make some cases more complex, but those cases probably
9976 won't match a pattern either with or without this. */
9978 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
9980 scalar_int_mode varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
9984 apply_distributive_law
9985 (simplify_gen_binary (GET_CODE (varop
), varop_mode
,
9986 simplify_and_const_int (NULL_RTX
, varop_mode
,
9989 simplify_and_const_int (NULL_RTX
, varop_mode
,
9994 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9995 the AND and see if one of the operands simplifies to zero. If so, we
9996 may eliminate it. */
9998 if (GET_CODE (varop
) == PLUS
9999 && pow2p_hwi (constop
+ 1))
10003 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
10004 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
10005 if (o0
== const0_rtx
)
10007 if (o1
== const0_rtx
)
10011 /* Make a SUBREG if necessary. If we can't make it, fail. */
10012 varop
= gen_lowpart (mode
, varop
);
10013 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
10016 /* If we are only masking insignificant bits, return VAROP. */
10017 if (constop
== nonzero
)
10020 if (varop
== orig_varop
&& constop
== orig_constop
)
10023 /* Otherwise, return an AND. */
10024 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
10028 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10031 Return an equivalent form, if different from X. Otherwise, return X. If
10032 X is zero, we are to always construct the equivalent form. */
10035 simplify_and_const_int (rtx x
, scalar_int_mode mode
, rtx varop
,
10036 unsigned HOST_WIDE_INT constop
)
10038 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
10043 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
10044 gen_int_mode (constop
, mode
));
10045 if (GET_MODE (x
) != mode
)
10046 x
= gen_lowpart (mode
, x
);
10050 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10051 We don't care about bits outside of those defined in MODE.
10053 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10054 a shift, AND, or zero_extract, we can do better. */
10057 reg_nonzero_bits_for_combine (const_rtx x
, scalar_int_mode xmode
,
10058 scalar_int_mode mode
,
10059 unsigned HOST_WIDE_INT
*nonzero
)
10062 reg_stat_type
*rsp
;
10064 /* If X is a register whose nonzero bits value is current, use it.
10065 Otherwise, if X is a register whose value we can find, use that
10066 value. Otherwise, use the previously-computed global nonzero bits
10067 for this register. */
10069 rsp
= ®_stat
[REGNO (x
)];
10070 if (rsp
->last_set_value
!= 0
10071 && (rsp
->last_set_mode
== mode
10072 || (GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
10073 && GET_MODE_CLASS (mode
) == MODE_INT
))
10074 && ((rsp
->last_set_label
>= label_tick_ebb_start
10075 && rsp
->last_set_label
< label_tick
)
10076 || (rsp
->last_set_label
== label_tick
10077 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10078 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10079 && REGNO (x
) < reg_n_sets_max
10080 && REG_N_SETS (REGNO (x
)) == 1
10081 && !REGNO_REG_SET_P
10082 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10085 /* Note that, even if the precision of last_set_mode is lower than that
10086 of mode, record_value_for_reg invoked nonzero_bits on the register
10087 with nonzero_bits_mode (because last_set_mode is necessarily integral
10088 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10089 are all valid, hence in mode too since nonzero_bits_mode is defined
10090 to the largest HWI_COMPUTABLE_MODE_P mode. */
10091 *nonzero
&= rsp
->last_set_nonzero_bits
;
10095 tem
= get_last_value (x
);
10098 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
10099 tem
= sign_extend_short_imm (tem
, xmode
, GET_MODE_PRECISION (mode
));
10104 if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
10106 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
10108 if (GET_MODE_PRECISION (xmode
) < GET_MODE_PRECISION (mode
))
10109 /* We don't know anything about the upper bits. */
10110 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (xmode
);
10118 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10119 end of X that are known to be equal to the sign bit. X will be used
10120 in mode MODE; the returned value will always be between 1 and the
10121 number of bits in MODE. */
10124 reg_num_sign_bit_copies_for_combine (const_rtx x
, scalar_int_mode xmode
,
10125 scalar_int_mode mode
,
10126 unsigned int *result
)
10129 reg_stat_type
*rsp
;
10131 rsp
= ®_stat
[REGNO (x
)];
10132 if (rsp
->last_set_value
!= 0
10133 && rsp
->last_set_mode
== mode
10134 && ((rsp
->last_set_label
>= label_tick_ebb_start
10135 && rsp
->last_set_label
< label_tick
)
10136 || (rsp
->last_set_label
== label_tick
10137 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10138 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10139 && REGNO (x
) < reg_n_sets_max
10140 && REG_N_SETS (REGNO (x
)) == 1
10141 && !REGNO_REG_SET_P
10142 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10145 *result
= rsp
->last_set_sign_bit_copies
;
10149 tem
= get_last_value (x
);
10153 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
10154 && GET_MODE_PRECISION (xmode
) == GET_MODE_PRECISION (mode
))
10155 *result
= rsp
->sign_bit_copies
;
10160 /* Return the number of "extended" bits there are in X, when interpreted
10161 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10162 unsigned quantities, this is the number of high-order zero bits.
10163 For signed quantities, this is the number of copies of the sign bit
10164 minus 1. In both case, this function returns the number of "spare"
10165 bits. For example, if two quantities for which this function returns
10166 at least 1 are added, the addition is known not to overflow.
10168 This function will always return 0 unless called during combine, which
10169 implies that it must be called from a define_split. */
10172 extended_count (const_rtx x
, machine_mode mode
, int unsignedp
)
10174 if (nonzero_sign_valid
== 0)
10177 scalar_int_mode int_mode
;
10179 ? (is_a
<scalar_int_mode
> (mode
, &int_mode
)
10180 && HWI_COMPUTABLE_MODE_P (int_mode
)
10181 ? (unsigned int) (GET_MODE_PRECISION (int_mode
) - 1
10182 - floor_log2 (nonzero_bits (x
, int_mode
)))
10184 : num_sign_bit_copies (x
, mode
) - 1);
10187 /* This function is called from `simplify_shift_const' to merge two
10188 outer operations. Specifically, we have already found that we need
10189 to perform operation *POP0 with constant *PCONST0 at the outermost
10190 position. We would now like to also perform OP1 with constant CONST1
10191 (with *POP0 being done last).
10193 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10194 the resulting operation. *PCOMP_P is set to 1 if we would need to
10195 complement the innermost operand, otherwise it is unchanged.
10197 MODE is the mode in which the operation will be done. No bits outside
10198 the width of this mode matter. It is assumed that the width of this mode
10199 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10201 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10202 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10203 result is simply *PCONST0.
10205 If the resulting operation cannot be expressed as one operation, we
10206 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10209 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, machine_mode mode
, int *pcomp_p
)
10211 enum rtx_code op0
= *pop0
;
10212 HOST_WIDE_INT const0
= *pconst0
;
10214 const0
&= GET_MODE_MASK (mode
);
10215 const1
&= GET_MODE_MASK (mode
);
10217 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10221 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10224 if (op1
== UNKNOWN
|| op0
== SET
)
10227 else if (op0
== UNKNOWN
)
10228 op0
= op1
, const0
= const1
;
10230 else if (op0
== op1
)
10254 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10255 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
10258 /* If the two constants aren't the same, we can't do anything. The
10259 remaining six cases can all be done. */
10260 else if (const0
!= const1
)
10268 /* (a & b) | b == b */
10270 else /* op1 == XOR */
10271 /* (a ^ b) | b == a | b */
10277 /* (a & b) ^ b == (~a) & b */
10278 op0
= AND
, *pcomp_p
= 1;
10279 else /* op1 == IOR */
10280 /* (a | b) ^ b == a & ~b */
10281 op0
= AND
, const0
= ~const0
;
10286 /* (a | b) & b == b */
10288 else /* op1 == XOR */
10289 /* (a ^ b) & b) == (~a) & b */
10296 /* Check for NO-OP cases. */
10297 const0
&= GET_MODE_MASK (mode
);
10299 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
10301 else if (const0
== 0 && op0
== AND
)
10303 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
10309 /* ??? Slightly redundant with the above mask, but not entirely.
10310 Moving this above means we'd have to sign-extend the mode mask
10311 for the final test. */
10312 if (op0
!= UNKNOWN
&& op0
!= NEG
)
10313 *pconst0
= trunc_int_for_mode (const0
, mode
);
10318 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10319 the shift in. The original shift operation CODE is performed on OP in
10320 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10321 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10322 result of the shift is subject to operation OUTER_CODE with operand
10325 static scalar_int_mode
10326 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
10327 scalar_int_mode orig_mode
, scalar_int_mode mode
,
10328 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
10330 gcc_assert (GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (orig_mode
));
10332 /* In general we can't perform in wider mode for right shift and rotate. */
10336 /* We can still widen if the bits brought in from the left are identical
10337 to the sign bit of ORIG_MODE. */
10338 if (num_sign_bit_copies (op
, mode
)
10339 > (unsigned) (GET_MODE_PRECISION (mode
)
10340 - GET_MODE_PRECISION (orig_mode
)))
10345 /* Similarly here but with zero bits. */
10346 if (HWI_COMPUTABLE_MODE_P (mode
)
10347 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
10350 /* We can also widen if the bits brought in will be masked off. This
10351 operation is performed in ORIG_MODE. */
10352 if (outer_code
== AND
)
10354 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
10357 && GET_MODE_PRECISION (orig_mode
) - care_bits
>= count
)
10366 gcc_unreachable ();
10373 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10374 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10375 if we cannot simplify it. Otherwise, return a simplified value.
10377 The shift is normally computed in the widest mode we find in VAROP, as
10378 long as it isn't a different number of words than RESULT_MODE. Exceptions
10379 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10382 simplify_shift_const_1 (enum rtx_code code
, machine_mode result_mode
,
10383 rtx varop
, int orig_count
)
10385 enum rtx_code orig_code
= code
;
10386 rtx orig_varop
= varop
;
10388 machine_mode mode
= result_mode
;
10389 machine_mode shift_mode
;
10390 scalar_int_mode tmode
, inner_mode
, int_mode
, int_varop_mode
, int_result_mode
;
10391 unsigned int mode_words
10392 = (GET_MODE_SIZE (mode
) + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
;
10393 /* We form (outer_op (code varop count) (outer_const)). */
10394 enum rtx_code outer_op
= UNKNOWN
;
10395 HOST_WIDE_INT outer_const
= 0;
10396 int complement_p
= 0;
10399 /* Make sure and truncate the "natural" shift on the way in. We don't
10400 want to do this inside the loop as it makes it more difficult to
10402 if (SHIFT_COUNT_TRUNCATED
)
10403 orig_count
&= GET_MODE_UNIT_BITSIZE (mode
) - 1;
10405 /* If we were given an invalid count, don't do anything except exactly
10406 what was requested. */
10408 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_UNIT_PRECISION (mode
))
10411 count
= orig_count
;
10413 /* Unless one of the branches of the `if' in this loop does a `continue',
10414 we will `break' the loop after the `if'. */
10418 /* If we have an operand of (clobber (const_int 0)), fail. */
10419 if (GET_CODE (varop
) == CLOBBER
)
10422 /* Convert ROTATERT to ROTATE. */
10423 if (code
== ROTATERT
)
10425 unsigned int bitsize
= GET_MODE_UNIT_PRECISION (result_mode
);
10427 count
= bitsize
- count
;
10430 shift_mode
= result_mode
;
10431 if (shift_mode
!= mode
)
10433 /* We only change the modes of scalar shifts. */
10434 int_mode
= as_a
<scalar_int_mode
> (mode
);
10435 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10436 shift_mode
= try_widen_shift_mode (code
, varop
, count
,
10437 int_result_mode
, int_mode
,
10438 outer_op
, outer_const
);
10441 scalar_int_mode shift_unit_mode
10442 = as_a
<scalar_int_mode
> (GET_MODE_INNER (shift_mode
));
10444 /* Handle cases where the count is greater than the size of the mode
10445 minus 1. For ASHIFT, use the size minus one as the count (this can
10446 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10447 take the count modulo the size. For other shifts, the result is
10450 Since these shifts are being produced by the compiler by combining
10451 multiple operations, each of which are defined, we know what the
10452 result is supposed to be. */
10454 if (count
> (GET_MODE_PRECISION (shift_unit_mode
) - 1))
10456 if (code
== ASHIFTRT
)
10457 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10458 else if (code
== ROTATE
|| code
== ROTATERT
)
10459 count
%= GET_MODE_PRECISION (shift_unit_mode
);
10462 /* We can't simply return zero because there may be an
10464 varop
= const0_rtx
;
10470 /* If we discovered we had to complement VAROP, leave. Making a NOT
10471 here would cause an infinite loop. */
10475 if (shift_mode
== shift_unit_mode
)
10477 /* An arithmetic right shift of a quantity known to be -1 or 0
10479 if (code
== ASHIFTRT
10480 && (num_sign_bit_copies (varop
, shift_unit_mode
)
10481 == GET_MODE_PRECISION (shift_unit_mode
)))
10487 /* If we are doing an arithmetic right shift and discarding all but
10488 the sign bit copies, this is equivalent to doing a shift by the
10489 bitsize minus one. Convert it into that shift because it will
10490 often allow other simplifications. */
10492 if (code
== ASHIFTRT
10493 && (count
+ num_sign_bit_copies (varop
, shift_unit_mode
)
10494 >= GET_MODE_PRECISION (shift_unit_mode
)))
10495 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10497 /* We simplify the tests below and elsewhere by converting
10498 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10499 `make_compound_operation' will convert it to an ASHIFTRT for
10500 those machines (such as VAX) that don't have an LSHIFTRT. */
10501 if (code
== ASHIFTRT
10502 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10503 && val_signbit_known_clear_p (shift_unit_mode
,
10504 nonzero_bits (varop
,
10508 if (((code
== LSHIFTRT
10509 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10510 && !(nonzero_bits (varop
, shift_unit_mode
) >> count
))
10512 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10513 && !((nonzero_bits (varop
, shift_unit_mode
) << count
)
10514 & GET_MODE_MASK (shift_unit_mode
))))
10515 && !side_effects_p (varop
))
10516 varop
= const0_rtx
;
10519 switch (GET_CODE (varop
))
10525 new_rtx
= expand_compound_operation (varop
);
10526 if (new_rtx
!= varop
)
10534 /* The following rules apply only to scalars. */
10535 if (shift_mode
!= shift_unit_mode
)
10537 int_mode
= as_a
<scalar_int_mode
> (mode
);
10539 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10540 minus the width of a smaller mode, we can do this with a
10541 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10542 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10543 && ! mode_dependent_address_p (XEXP (varop
, 0),
10544 MEM_ADDR_SPACE (varop
))
10545 && ! MEM_VOLATILE_P (varop
)
10546 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode
) - count
, 1)
10549 new_rtx
= adjust_address_nv (varop
, tmode
,
10550 BYTES_BIG_ENDIAN
? 0
10551 : count
/ BITS_PER_UNIT
);
10553 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
10554 : ZERO_EXTEND
, int_mode
, new_rtx
);
10561 /* The following rules apply only to scalars. */
10562 if (shift_mode
!= shift_unit_mode
)
10564 int_mode
= as_a
<scalar_int_mode
> (mode
);
10565 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10567 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10568 the same number of words as what we've seen so far. Then store
10569 the widest mode in MODE. */
10570 if (subreg_lowpart_p (varop
)
10571 && is_int_mode (GET_MODE (SUBREG_REG (varop
)), &inner_mode
)
10572 && GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_varop_mode
)
10573 && (unsigned int) ((GET_MODE_SIZE (inner_mode
)
10574 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
10576 && GET_MODE_CLASS (int_varop_mode
) == MODE_INT
)
10578 varop
= SUBREG_REG (varop
);
10579 if (GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_mode
))
10586 /* Some machines use MULT instead of ASHIFT because MULT
10587 is cheaper. But it is still better on those machines to
10588 merge two shifts into one. */
10589 if (CONST_INT_P (XEXP (varop
, 1))
10590 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10593 = simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
10595 GEN_INT (exact_log2 (
10596 UINTVAL (XEXP (varop
, 1)))));
10602 /* Similar, for when divides are cheaper. */
10603 if (CONST_INT_P (XEXP (varop
, 1))
10604 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10607 = simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10609 GEN_INT (exact_log2 (
10610 UINTVAL (XEXP (varop
, 1)))));
10616 /* If we are extracting just the sign bit of an arithmetic
10617 right shift, that shift is not needed. However, the sign
10618 bit of a wider mode may be different from what would be
10619 interpreted as the sign bit in a narrower mode, so, if
10620 the result is narrower, don't discard the shift. */
10621 if (code
== LSHIFTRT
10622 && count
== (GET_MODE_UNIT_BITSIZE (result_mode
) - 1)
10623 && (GET_MODE_UNIT_BITSIZE (result_mode
)
10624 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop
))))
10626 varop
= XEXP (varop
, 0);
10635 /* The following rules apply only to scalars. */
10636 if (shift_mode
!= shift_unit_mode
)
10638 int_mode
= as_a
<scalar_int_mode
> (mode
);
10639 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10640 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10642 /* Here we have two nested shifts. The result is usually the
10643 AND of a new shift with a mask. We compute the result below. */
10644 if (CONST_INT_P (XEXP (varop
, 1))
10645 && INTVAL (XEXP (varop
, 1)) >= 0
10646 && INTVAL (XEXP (varop
, 1)) < GET_MODE_PRECISION (int_varop_mode
)
10647 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
10648 && HWI_COMPUTABLE_MODE_P (int_mode
))
10650 enum rtx_code first_code
= GET_CODE (varop
);
10651 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10652 unsigned HOST_WIDE_INT mask
;
10655 /* We have one common special case. We can't do any merging if
10656 the inner code is an ASHIFTRT of a smaller mode. However, if
10657 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10658 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10659 we can convert it to
10660 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10661 This simplifies certain SIGN_EXTEND operations. */
10662 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10663 && count
== (GET_MODE_PRECISION (int_result_mode
)
10664 - GET_MODE_PRECISION (int_varop_mode
)))
10666 /* C3 has the low-order C1 bits zero. */
10668 mask
= GET_MODE_MASK (int_mode
)
10669 & ~((HOST_WIDE_INT_1U
<< first_count
) - 1);
10671 varop
= simplify_and_const_int (NULL_RTX
, int_result_mode
,
10672 XEXP (varop
, 0), mask
);
10673 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
,
10674 int_result_mode
, varop
, count
);
10675 count
= first_count
;
10680 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10681 than C1 high-order bits equal to the sign bit, we can convert
10682 this to either an ASHIFT or an ASHIFTRT depending on the
10685 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10687 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10688 && int_varop_mode
== shift_unit_mode
10689 && (num_sign_bit_copies (XEXP (varop
, 0), shift_unit_mode
)
10692 varop
= XEXP (varop
, 0);
10693 count
-= first_count
;
10703 /* There are some cases we can't do. If CODE is ASHIFTRT,
10704 we can only do this if FIRST_CODE is also ASHIFTRT.
10706 We can't do the case when CODE is ROTATE and FIRST_CODE is
10709 If the mode of this shift is not the mode of the outer shift,
10710 we can't do this if either shift is a right shift or ROTATE.
10712 Finally, we can't do any of these if the mode is too wide
10713 unless the codes are the same.
10715 Handle the case where the shift codes are the same
10718 if (code
== first_code
)
10720 if (int_varop_mode
!= int_result_mode
10721 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10722 || code
== ROTATE
))
10725 count
+= first_count
;
10726 varop
= XEXP (varop
, 0);
10730 if (code
== ASHIFTRT
10731 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10732 || GET_MODE_PRECISION (int_mode
) > HOST_BITS_PER_WIDE_INT
10733 || (int_varop_mode
!= int_result_mode
10734 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10735 || first_code
== ROTATE
10736 || code
== ROTATE
)))
10739 /* To compute the mask to apply after the shift, shift the
10740 nonzero bits of the inner shift the same way the
10741 outer shift will. */
10743 mask_rtx
= gen_int_mode (nonzero_bits (varop
, int_varop_mode
),
10747 = simplify_const_binary_operation (code
, int_result_mode
,
10748 mask_rtx
, GEN_INT (count
));
10750 /* Give up if we can't compute an outer operation to use. */
10752 || !CONST_INT_P (mask_rtx
)
10753 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10755 int_result_mode
, &complement_p
))
10758 /* If the shifts are in the same direction, we add the
10759 counts. Otherwise, we subtract them. */
10760 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10761 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10762 count
+= first_count
;
10764 count
-= first_count
;
10766 /* If COUNT is positive, the new shift is usually CODE,
10767 except for the two exceptions below, in which case it is
10768 FIRST_CODE. If the count is negative, FIRST_CODE should
10771 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10772 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10774 else if (count
< 0)
10775 code
= first_code
, count
= -count
;
10777 varop
= XEXP (varop
, 0);
10781 /* If we have (A << B << C) for any shift, we can convert this to
10782 (A << C << B). This wins if A is a constant. Only try this if
10783 B is not a constant. */
10785 else if (GET_CODE (varop
) == code
10786 && CONST_INT_P (XEXP (varop
, 0))
10787 && !CONST_INT_P (XEXP (varop
, 1)))
10789 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10790 sure the result will be masked. See PR70222. */
10791 if (code
== LSHIFTRT
10792 && int_mode
!= int_result_mode
10793 && !merge_outer_ops (&outer_op
, &outer_const
, AND
,
10794 GET_MODE_MASK (int_result_mode
)
10795 >> orig_count
, int_result_mode
,
10798 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10799 up outer sign extension (often left and right shift) is
10800 hardly more efficient than the original. See PR70429. */
10801 if (code
== ASHIFTRT
&& int_mode
!= int_result_mode
)
10804 rtx new_rtx
= simplify_const_binary_operation (code
, int_mode
,
10807 varop
= gen_rtx_fmt_ee (code
, int_mode
, new_rtx
, XEXP (varop
, 1));
10814 /* The following rules apply only to scalars. */
10815 if (shift_mode
!= shift_unit_mode
)
10818 /* Make this fit the case below. */
10819 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0), constm1_rtx
);
10825 /* The following rules apply only to scalars. */
10826 if (shift_mode
!= shift_unit_mode
)
10828 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10829 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10831 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10832 with C the size of VAROP - 1 and the shift is logical if
10833 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10834 we have an (le X 0) operation. If we have an arithmetic shift
10835 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10836 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10838 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
10839 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
10840 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10841 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10842 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
10843 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10846 varop
= gen_rtx_LE (int_varop_mode
, XEXP (varop
, 1),
10849 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10850 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
10855 /* If we have (shift (logical)), move the logical to the outside
10856 to allow it to possibly combine with another logical and the
10857 shift to combine with another shift. This also canonicalizes to
10858 what a ZERO_EXTRACT looks like. Also, some machines have
10859 (and (shift)) insns. */
10861 if (CONST_INT_P (XEXP (varop
, 1))
10862 /* We can't do this if we have (ashiftrt (xor)) and the
10863 constant has its sign bit set in shift_unit_mode with
10864 shift_unit_mode wider than result_mode. */
10865 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10866 && int_result_mode
!= shift_unit_mode
10867 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10869 && (new_rtx
= simplify_const_binary_operation
10870 (code
, int_result_mode
,
10871 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
10872 GEN_INT (count
))) != 0
10873 && CONST_INT_P (new_rtx
)
10874 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
10875 INTVAL (new_rtx
), int_result_mode
,
10878 varop
= XEXP (varop
, 0);
10882 /* If we can't do that, try to simplify the shift in each arm of the
10883 logical expression, make a new logical expression, and apply
10884 the inverse distributive law. This also can't be done for
10885 (ashiftrt (xor)) where we've widened the shift and the constant
10886 changes the sign bit. */
10887 if (CONST_INT_P (XEXP (varop
, 1))
10888 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10889 && int_result_mode
!= shift_unit_mode
10890 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10893 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
10894 XEXP (varop
, 0), count
);
10895 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
10896 XEXP (varop
, 1), count
);
10898 varop
= simplify_gen_binary (GET_CODE (varop
), shift_unit_mode
,
10900 varop
= apply_distributive_law (varop
);
10908 /* The following rules apply only to scalars. */
10909 if (shift_mode
!= shift_unit_mode
)
10911 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10913 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10914 says that the sign bit can be tested, FOO has mode MODE, C is
10915 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10916 that may be nonzero. */
10917 if (code
== LSHIFTRT
10918 && XEXP (varop
, 1) == const0_rtx
10919 && GET_MODE (XEXP (varop
, 0)) == int_result_mode
10920 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
10921 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
10922 && STORE_FLAG_VALUE
== -1
10923 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
10924 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
10925 int_result_mode
, &complement_p
))
10927 varop
= XEXP (varop
, 0);
10934 /* The following rules apply only to scalars. */
10935 if (shift_mode
!= shift_unit_mode
)
10937 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10939 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10940 than the number of bits in the mode is equivalent to A. */
10941 if (code
== LSHIFTRT
10942 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
10943 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1)
10945 varop
= XEXP (varop
, 0);
10950 /* NEG commutes with ASHIFT since it is multiplication. Move the
10951 NEG outside to allow shifts to combine. */
10953 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0,
10954 int_result_mode
, &complement_p
))
10956 varop
= XEXP (varop
, 0);
10962 /* The following rules apply only to scalars. */
10963 if (shift_mode
!= shift_unit_mode
)
10965 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10967 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10968 is one less than the number of bits in the mode is
10969 equivalent to (xor A 1). */
10970 if (code
== LSHIFTRT
10971 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
10972 && XEXP (varop
, 1) == constm1_rtx
10973 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
10974 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
10975 int_result_mode
, &complement_p
))
10978 varop
= XEXP (varop
, 0);
10982 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10983 that might be nonzero in BAR are those being shifted out and those
10984 bits are known zero in FOO, we can replace the PLUS with FOO.
10985 Similarly in the other operand order. This code occurs when
10986 we are computing the size of a variable-size array. */
10988 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10989 && count
< HOST_BITS_PER_WIDE_INT
10990 && nonzero_bits (XEXP (varop
, 1), int_result_mode
) >> count
== 0
10991 && (nonzero_bits (XEXP (varop
, 1), int_result_mode
)
10992 & nonzero_bits (XEXP (varop
, 0), int_result_mode
)) == 0)
10994 varop
= XEXP (varop
, 0);
10997 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10998 && count
< HOST_BITS_PER_WIDE_INT
10999 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
11000 && 0 == (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
11002 && 0 == (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
11003 & nonzero_bits (XEXP (varop
, 1), int_result_mode
)))
11005 varop
= XEXP (varop
, 1);
11009 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11011 && CONST_INT_P (XEXP (varop
, 1))
11012 && (new_rtx
= simplify_const_binary_operation
11013 (ASHIFT
, int_result_mode
,
11014 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11015 GEN_INT (count
))) != 0
11016 && CONST_INT_P (new_rtx
)
11017 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
11018 INTVAL (new_rtx
), int_result_mode
,
11021 varop
= XEXP (varop
, 0);
11025 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11026 signbit', and attempt to change the PLUS to an XOR and move it to
11027 the outer operation as is done above in the AND/IOR/XOR case
11028 leg for shift(logical). See details in logical handling above
11029 for reasoning in doing so. */
11030 if (code
== LSHIFTRT
11031 && CONST_INT_P (XEXP (varop
, 1))
11032 && mode_signbit_p (int_result_mode
, XEXP (varop
, 1))
11033 && (new_rtx
= simplify_const_binary_operation
11034 (code
, int_result_mode
,
11035 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11036 GEN_INT (count
))) != 0
11037 && CONST_INT_P (new_rtx
)
11038 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
11039 INTVAL (new_rtx
), int_result_mode
,
11042 varop
= XEXP (varop
, 0);
11049 /* The following rules apply only to scalars. */
11050 if (shift_mode
!= shift_unit_mode
)
11052 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
11054 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11055 with C the size of VAROP - 1 and the shift is logical if
11056 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11057 we have a (gt X 0) operation. If the shift is arithmetic with
11058 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11059 we have a (neg (gt X 0)) operation. */
11061 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
11062 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
11063 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
11064 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
11065 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11066 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
11067 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
11070 varop
= gen_rtx_GT (int_varop_mode
, XEXP (varop
, 1),
11073 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
11074 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
11081 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11082 if the truncate does not affect the value. */
11083 if (code
== LSHIFTRT
11084 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
11085 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11086 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
11087 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop
, 0)))
11088 - GET_MODE_UNIT_PRECISION (GET_MODE (varop
)))))
11090 rtx varop_inner
= XEXP (varop
, 0);
11093 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
11094 XEXP (varop_inner
, 0),
11096 (count
+ INTVAL (XEXP (varop_inner
, 1))));
11097 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
11110 shift_mode
= result_mode
;
11111 if (shift_mode
!= mode
)
11113 /* We only change the modes of scalar shifts. */
11114 int_mode
= as_a
<scalar_int_mode
> (mode
);
11115 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11116 shift_mode
= try_widen_shift_mode (code
, varop
, count
, int_result_mode
,
11117 int_mode
, outer_op
, outer_const
);
11120 /* We have now finished analyzing the shift. The result should be
11121 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11122 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11123 to the result of the shift. OUTER_CONST is the relevant constant,
11124 but we must turn off all bits turned off in the shift. */
11126 if (outer_op
== UNKNOWN
11127 && orig_code
== code
&& orig_count
== count
11128 && varop
== orig_varop
11129 && shift_mode
== GET_MODE (varop
))
11132 /* Make a SUBREG if necessary. If we can't make it, fail. */
11133 varop
= gen_lowpart (shift_mode
, varop
);
11134 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
11137 /* If we have an outer operation and we just made a shift, it is
11138 possible that we could have simplified the shift were it not
11139 for the outer operation. So try to do the simplification
11142 if (outer_op
!= UNKNOWN
)
11143 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
11148 x
= simplify_gen_binary (code
, shift_mode
, varop
, GEN_INT (count
));
11150 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11151 turn off all the bits that the shift would have turned off. */
11152 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
11153 /* We only change the modes of scalar shifts. */
11154 x
= simplify_and_const_int (NULL_RTX
, as_a
<scalar_int_mode
> (shift_mode
),
11155 x
, GET_MODE_MASK (result_mode
) >> orig_count
);
11157 /* Do the remainder of the processing in RESULT_MODE. */
11158 x
= gen_lowpart_or_truncate (result_mode
, x
);
11160 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11163 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
11165 if (outer_op
!= UNKNOWN
)
11167 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11169 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
11170 && GET_MODE_PRECISION (int_result_mode
) < HOST_BITS_PER_WIDE_INT
)
11171 outer_const
= trunc_int_for_mode (outer_const
, int_result_mode
);
11173 if (outer_op
== AND
)
11174 x
= simplify_and_const_int (NULL_RTX
, int_result_mode
, x
, outer_const
);
11175 else if (outer_op
== SET
)
11177 /* This means that we have determined that the result is
11178 equivalent to a constant. This should be rare. */
11179 if (!side_effects_p (x
))
11180 x
= GEN_INT (outer_const
);
11182 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
11183 x
= simplify_gen_unary (outer_op
, int_result_mode
, x
, int_result_mode
);
11185 x
= simplify_gen_binary (outer_op
, int_result_mode
, x
,
11186 GEN_INT (outer_const
));
11192 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11193 The result of the shift is RESULT_MODE. If we cannot simplify it,
11194 return X or, if it is NULL, synthesize the expression with
11195 simplify_gen_binary. Otherwise, return a simplified value.
11197 The shift is normally computed in the widest mode we find in VAROP, as
11198 long as it isn't a different number of words than RESULT_MODE. Exceptions
11199 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11202 simplify_shift_const (rtx x
, enum rtx_code code
, machine_mode result_mode
,
11203 rtx varop
, int count
)
11205 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
11210 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
, GEN_INT (count
));
11211 if (GET_MODE (x
) != result_mode
)
11212 x
= gen_lowpart (result_mode
, x
);
11217 /* A subroutine of recog_for_combine. See there for arguments and
11221 recog_for_combine_1 (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11223 rtx pat
= *pnewpat
;
11224 rtx pat_without_clobbers
;
11225 int insn_code_number
;
11226 int num_clobbers_to_add
= 0;
11228 rtx notes
= NULL_RTX
;
11229 rtx old_notes
, old_pat
;
11232 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11233 we use to indicate that something didn't match. If we find such a
11234 thing, force rejection. */
11235 if (GET_CODE (pat
) == PARALLEL
)
11236 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
11237 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
11238 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
11241 old_pat
= PATTERN (insn
);
11242 old_notes
= REG_NOTES (insn
);
11243 PATTERN (insn
) = pat
;
11244 REG_NOTES (insn
) = NULL_RTX
;
11246 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11247 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11249 if (insn_code_number
< 0)
11250 fputs ("Failed to match this instruction:\n", dump_file
);
11252 fputs ("Successfully matched this instruction:\n", dump_file
);
11253 print_rtl_single (dump_file
, pat
);
11256 /* If it isn't, there is the possibility that we previously had an insn
11257 that clobbered some register as a side effect, but the combined
11258 insn doesn't need to do that. So try once more without the clobbers
11259 unless this represents an ASM insn. */
11261 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
11262 && GET_CODE (pat
) == PARALLEL
)
11266 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
11267 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
11270 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
11274 SUBST_INT (XVECLEN (pat
, 0), pos
);
11277 pat
= XVECEXP (pat
, 0, 0);
11279 PATTERN (insn
) = pat
;
11280 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11281 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11283 if (insn_code_number
< 0)
11284 fputs ("Failed to match this instruction:\n", dump_file
);
11286 fputs ("Successfully matched this instruction:\n", dump_file
);
11287 print_rtl_single (dump_file
, pat
);
11291 pat_without_clobbers
= pat
;
11293 PATTERN (insn
) = old_pat
;
11294 REG_NOTES (insn
) = old_notes
;
11296 /* Recognize all noop sets, these will be killed by followup pass. */
11297 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
11298 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
11300 /* If we had any clobbers to add, make a new pattern than contains
11301 them. Then check to make sure that all of them are dead. */
11302 if (num_clobbers_to_add
)
11304 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
11305 rtvec_alloc (GET_CODE (pat
) == PARALLEL
11306 ? (XVECLEN (pat
, 0)
11307 + num_clobbers_to_add
)
11308 : num_clobbers_to_add
+ 1));
11310 if (GET_CODE (pat
) == PARALLEL
)
11311 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11312 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
11314 XVECEXP (newpat
, 0, 0) = pat
;
11316 add_clobbers (newpat
, insn_code_number
);
11318 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
11319 i
< XVECLEN (newpat
, 0); i
++)
11321 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
11322 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
11324 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
11326 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
11327 notes
= alloc_reg_note (REG_UNUSED
,
11328 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
11334 if (insn_code_number
>= 0
11335 && insn_code_number
!= NOOP_MOVE_INSN_CODE
)
11337 old_pat
= PATTERN (insn
);
11338 old_notes
= REG_NOTES (insn
);
11339 old_icode
= INSN_CODE (insn
);
11340 PATTERN (insn
) = pat
;
11341 REG_NOTES (insn
) = notes
;
11342 INSN_CODE (insn
) = insn_code_number
;
11344 /* Allow targets to reject combined insn. */
11345 if (!targetm
.legitimate_combined_insn (insn
))
11347 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11348 fputs ("Instruction not appropriate for target.",
11351 /* Callers expect recog_for_combine to strip
11352 clobbers from the pattern on failure. */
11353 pat
= pat_without_clobbers
;
11356 insn_code_number
= -1;
11359 PATTERN (insn
) = old_pat
;
11360 REG_NOTES (insn
) = old_notes
;
11361 INSN_CODE (insn
) = old_icode
;
11367 return insn_code_number
;
11370 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11371 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11372 Return whether anything was so changed. */
11375 change_zero_ext (rtx pat
)
11377 bool changed
= false;
11378 rtx
*src
= &SET_SRC (pat
);
11380 subrtx_ptr_iterator::array_type array
;
11381 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11384 scalar_int_mode mode
, inner_mode
;
11385 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
11389 if (GET_CODE (x
) == ZERO_EXTRACT
11390 && CONST_INT_P (XEXP (x
, 1))
11391 && CONST_INT_P (XEXP (x
, 2))
11392 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
)
11393 && GET_MODE_PRECISION (inner_mode
) <= GET_MODE_PRECISION (mode
))
11395 size
= INTVAL (XEXP (x
, 1));
11397 int start
= INTVAL (XEXP (x
, 2));
11398 if (BITS_BIG_ENDIAN
)
11399 start
= GET_MODE_PRECISION (inner_mode
) - size
- start
;
11402 x
= gen_rtx_LSHIFTRT (inner_mode
, XEXP (x
, 0), GEN_INT (start
));
11405 if (mode
!= inner_mode
)
11406 x
= gen_lowpart_SUBREG (mode
, x
);
11408 else if (GET_CODE (x
) == ZERO_EXTEND
11409 && GET_CODE (XEXP (x
, 0)) == SUBREG
11410 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x
, 0))))
11411 && !paradoxical_subreg_p (XEXP (x
, 0))
11412 && subreg_lowpart_p (XEXP (x
, 0)))
11414 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11415 size
= GET_MODE_PRECISION (inner_mode
);
11416 x
= SUBREG_REG (XEXP (x
, 0));
11417 if (GET_MODE (x
) != mode
)
11418 x
= gen_lowpart_SUBREG (mode
, x
);
11420 else if (GET_CODE (x
) == ZERO_EXTEND
11421 && REG_P (XEXP (x
, 0))
11422 && HARD_REGISTER_P (XEXP (x
, 0))
11423 && can_change_dest_mode (XEXP (x
, 0), 0, mode
))
11425 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11426 size
= GET_MODE_PRECISION (inner_mode
);
11427 x
= gen_rtx_REG (mode
, REGNO (XEXP (x
, 0)));
11432 if (!(GET_CODE (x
) == LSHIFTRT
11433 && CONST_INT_P (XEXP (x
, 1))
11434 && size
+ INTVAL (XEXP (x
, 1)) == GET_MODE_PRECISION (mode
)))
11436 wide_int mask
= wi::mask (size
, false, GET_MODE_PRECISION (mode
));
11437 x
= gen_rtx_AND (mode
, x
, immed_wide_int_const (mask
, mode
));
11445 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11446 maybe_swap_commutative_operands (**iter
);
11448 rtx
*dst
= &SET_DEST (pat
);
11449 scalar_int_mode mode
;
11450 if (GET_CODE (*dst
) == ZERO_EXTRACT
11451 && REG_P (XEXP (*dst
, 0))
11452 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (*dst
, 0)), &mode
)
11453 && CONST_INT_P (XEXP (*dst
, 1))
11454 && CONST_INT_P (XEXP (*dst
, 2)))
11456 rtx reg
= XEXP (*dst
, 0);
11457 int width
= INTVAL (XEXP (*dst
, 1));
11458 int offset
= INTVAL (XEXP (*dst
, 2));
11459 int reg_width
= GET_MODE_PRECISION (mode
);
11460 if (BITS_BIG_ENDIAN
)
11461 offset
= reg_width
- width
- offset
;
11464 wide_int mask
= wi::shifted_mask (offset
, width
, true, reg_width
);
11465 wide_int mask2
= wi::shifted_mask (offset
, width
, false, reg_width
);
11466 x
= gen_rtx_AND (mode
, reg
, immed_wide_int_const (mask
, mode
));
11468 y
= gen_rtx_ASHIFT (mode
, SET_SRC (pat
), GEN_INT (offset
));
11471 z
= gen_rtx_AND (mode
, y
, immed_wide_int_const (mask2
, mode
));
11472 w
= gen_rtx_IOR (mode
, x
, z
);
11473 SUBST (SET_DEST (pat
), reg
);
11474 SUBST (SET_SRC (pat
), w
);
11482 /* Like recog, but we receive the address of a pointer to a new pattern.
11483 We try to match the rtx that the pointer points to.
11484 If that fails, we may try to modify or replace the pattern,
11485 storing the replacement into the same pointer object.
11487 Modifications include deletion or addition of CLOBBERs. If the
11488 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11489 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11490 (and undo if that fails).
11492 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11493 the CLOBBERs are placed.
11495 The value is the final insn code from the pattern ultimately matched,
11499 recog_for_combine (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11501 rtx pat
= *pnewpat
;
11502 int insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11503 if (insn_code_number
>= 0 || check_asm_operands (pat
))
11504 return insn_code_number
;
11506 void *marker
= get_undo_marker ();
11507 bool changed
= false;
11509 if (GET_CODE (pat
) == SET
)
11510 changed
= change_zero_ext (pat
);
11511 else if (GET_CODE (pat
) == PARALLEL
)
11514 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11516 rtx set
= XVECEXP (pat
, 0, i
);
11517 if (GET_CODE (set
) == SET
)
11518 changed
|= change_zero_ext (set
);
11524 insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11526 if (insn_code_number
< 0)
11527 undo_to_marker (marker
);
11530 return insn_code_number
;
11533 /* Like gen_lowpart_general but for use by combine. In combine it
11534 is not possible to create any new pseudoregs. However, it is
11535 safe to create invalid memory addresses, because combine will
11536 try to recognize them and all they will do is make the combine
11539 If for some reason this cannot do its job, an rtx
11540 (clobber (const_int 0)) is returned.
11541 An insn containing that will not be recognized. */
11544 gen_lowpart_for_combine (machine_mode omode
, rtx x
)
11546 machine_mode imode
= GET_MODE (x
);
11547 unsigned int osize
= GET_MODE_SIZE (omode
);
11548 unsigned int isize
= GET_MODE_SIZE (imode
);
11551 if (omode
== imode
)
11554 /* We can only support MODE being wider than a word if X is a
11555 constant integer or has a mode the same size. */
11556 if (GET_MODE_SIZE (omode
) > UNITS_PER_WORD
11557 && ! (CONST_SCALAR_INT_P (x
) || isize
== osize
))
11560 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11561 won't know what to do. So we will strip off the SUBREG here and
11562 process normally. */
11563 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
11565 x
= SUBREG_REG (x
);
11567 /* For use in case we fall down into the address adjustments
11568 further below, we need to adjust the known mode and size of
11569 x; imode and isize, since we just adjusted x. */
11570 imode
= GET_MODE (x
);
11572 if (imode
== omode
)
11575 isize
= GET_MODE_SIZE (imode
);
11578 result
= gen_lowpart_common (omode
, x
);
11587 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11589 if (MEM_VOLATILE_P (x
)
11590 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
)))
11593 /* If we want to refer to something bigger than the original memref,
11594 generate a paradoxical subreg instead. That will force a reload
11595 of the original memref X. */
11596 if (paradoxical_subreg_p (omode
, imode
))
11597 return gen_rtx_SUBREG (omode
, x
, 0);
11599 if (WORDS_BIG_ENDIAN
)
11600 offset
= MAX (isize
, UNITS_PER_WORD
) - MAX (osize
, UNITS_PER_WORD
);
11602 /* Adjust the address so that the address-after-the-data is
11604 if (BYTES_BIG_ENDIAN
)
11605 offset
-= MIN (UNITS_PER_WORD
, osize
) - MIN (UNITS_PER_WORD
, isize
);
11607 return adjust_address_nv (x
, omode
, offset
);
11610 /* If X is a comparison operator, rewrite it in a new mode. This
11611 probably won't match, but may allow further simplifications. */
11612 else if (COMPARISON_P (x
))
11613 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
11615 /* If we couldn't simplify X any other way, just enclose it in a
11616 SUBREG. Normally, this SUBREG won't match, but some patterns may
11617 include an explicit SUBREG or we may simplify it further in combine. */
11622 if (imode
== VOIDmode
)
11624 imode
= int_mode_for_mode (omode
).require ();
11625 x
= gen_lowpart_common (imode
, x
);
11629 res
= lowpart_subreg (omode
, x
, imode
);
11635 return gen_rtx_CLOBBER (omode
, const0_rtx
);
11638 /* Try to simplify a comparison between OP0 and a constant OP1,
11639 where CODE is the comparison code that will be tested, into a
11640 (CODE OP0 const0_rtx) form.
11642 The result is a possibly different comparison code to use.
11643 *POP1 may be updated. */
11645 static enum rtx_code
11646 simplify_compare_const (enum rtx_code code
, machine_mode mode
,
11647 rtx op0
, rtx
*pop1
)
11649 scalar_int_mode int_mode
;
11650 HOST_WIDE_INT const_op
= INTVAL (*pop1
);
11652 /* Get the constant we are comparing against and turn off all bits
11653 not on in our mode. */
11654 if (mode
!= VOIDmode
)
11655 const_op
= trunc_int_for_mode (const_op
, mode
);
11657 /* If we are comparing against a constant power of two and the value
11658 being compared can only have that single bit nonzero (e.g., it was
11659 `and'ed with that bit), we can replace this with a comparison
11662 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
11663 || code
== LT
|| code
== LTU
)
11664 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11665 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11666 && pow2p_hwi (const_op
& GET_MODE_MASK (int_mode
))
11667 && (nonzero_bits (op0
, int_mode
)
11668 == (unsigned HOST_WIDE_INT
) (const_op
& GET_MODE_MASK (int_mode
))))
11670 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11674 /* Similarly, if we are comparing a value known to be either -1 or
11675 0 with -1, change it to the opposite comparison against zero. */
11677 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11678 || code
== GEU
|| code
== LTU
)
11679 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11680 && num_sign_bit_copies (op0
, int_mode
) == GET_MODE_PRECISION (int_mode
))
11682 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11686 /* Do some canonicalizations based on the comparison code. We prefer
11687 comparisons against zero and then prefer equality comparisons.
11688 If we can reduce the size of a constant, we will do that too. */
11692 /* < C is equivalent to <= (C - 1) */
11697 /* ... fall through to LE case below. */
11698 gcc_fallthrough ();
11704 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11711 /* If we are doing a <= 0 comparison on a value known to have
11712 a zero sign bit, we can replace this with == 0. */
11713 else if (const_op
== 0
11714 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11715 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11716 && (nonzero_bits (op0
, int_mode
)
11717 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11723 /* >= C is equivalent to > (C - 1). */
11728 /* ... fall through to GT below. */
11729 gcc_fallthrough ();
11735 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11742 /* If we are doing a > 0 comparison on a value known to have
11743 a zero sign bit, we can replace this with != 0. */
11744 else if (const_op
== 0
11745 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11746 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11747 && (nonzero_bits (op0
, int_mode
)
11748 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11754 /* < C is equivalent to <= (C - 1). */
11759 /* ... fall through ... */
11761 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11762 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11763 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11764 && ((unsigned HOST_WIDE_INT
) const_op
11765 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11775 /* unsigned <= 0 is equivalent to == 0 */
11778 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11779 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11780 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11781 && ((unsigned HOST_WIDE_INT
) const_op
11782 == ((HOST_WIDE_INT_1U
11783 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1)))
11791 /* >= C is equivalent to > (C - 1). */
11796 /* ... fall through ... */
11799 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11800 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11801 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11802 && ((unsigned HOST_WIDE_INT
) const_op
11803 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11813 /* unsigned > 0 is equivalent to != 0 */
11816 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11817 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11818 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11819 && ((unsigned HOST_WIDE_INT
) const_op
11820 == (HOST_WIDE_INT_1U
11821 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1))
11832 *pop1
= GEN_INT (const_op
);
11836 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11837 comparison code that will be tested.
11839 The result is a possibly different comparison code to use. *POP0 and
11840 *POP1 may be updated.
11842 It is possible that we might detect that a comparison is either always
11843 true or always false. However, we do not perform general constant
11844 folding in combine, so this knowledge isn't useful. Such tautologies
11845 should have been detected earlier. Hence we ignore all such cases. */
11847 static enum rtx_code
11848 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
11854 scalar_int_mode mode
, inner_mode
, tmode
;
11855 opt_scalar_int_mode tmode_iter
;
11857 /* Try a few ways of applying the same transformation to both operands. */
11860 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11861 so check specially. */
11862 if (!WORD_REGISTER_OPERATIONS
11863 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
11864 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
11865 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11866 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
11867 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
11868 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
11869 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &mode
)
11870 && (is_a
<scalar_int_mode
>
11871 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))), &inner_mode
))
11872 && inner_mode
== GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0)))
11873 && CONST_INT_P (XEXP (op0
, 1))
11874 && XEXP (op0
, 1) == XEXP (op1
, 1)
11875 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
11876 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
11877 && (INTVAL (XEXP (op0
, 1))
11878 == (GET_MODE_PRECISION (mode
)
11879 - GET_MODE_PRECISION (inner_mode
))))
11881 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
11882 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
11885 /* If both operands are the same constant shift, see if we can ignore the
11886 shift. We can if the shift is a rotate or if the bits shifted out of
11887 this shift are known to be zero for both inputs and if the type of
11888 comparison is compatible with the shift. */
11889 if (GET_CODE (op0
) == GET_CODE (op1
)
11890 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
11891 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
11892 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
11893 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
11894 || (GET_CODE (op0
) == ASHIFTRT
11895 && (code
!= GTU
&& code
!= LTU
11896 && code
!= GEU
&& code
!= LEU
)))
11897 && CONST_INT_P (XEXP (op0
, 1))
11898 && INTVAL (XEXP (op0
, 1)) >= 0
11899 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
11900 && XEXP (op0
, 1) == XEXP (op1
, 1))
11902 machine_mode mode
= GET_MODE (op0
);
11903 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11904 int shift_count
= INTVAL (XEXP (op0
, 1));
11906 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
11907 mask
&= (mask
>> shift_count
) << shift_count
;
11908 else if (GET_CODE (op0
) == ASHIFT
)
11909 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
11911 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
11912 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
11913 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
11918 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11919 SUBREGs are of the same mode, and, in both cases, the AND would
11920 be redundant if the comparison was done in the narrower mode,
11921 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11922 and the operand's possibly nonzero bits are 0xffffff01; in that case
11923 if we only care about QImode, we don't need the AND). This case
11924 occurs if the output mode of an scc insn is not SImode and
11925 STORE_FLAG_VALUE == 1 (e.g., the 386).
11927 Similarly, check for a case where the AND's are ZERO_EXTEND
11928 operations from some narrower mode even though a SUBREG is not
11931 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
11932 && CONST_INT_P (XEXP (op0
, 1))
11933 && CONST_INT_P (XEXP (op1
, 1)))
11935 rtx inner_op0
= XEXP (op0
, 0);
11936 rtx inner_op1
= XEXP (op1
, 0);
11937 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
11938 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
11941 if (paradoxical_subreg_p (inner_op0
)
11942 && GET_CODE (inner_op1
) == SUBREG
11943 && (GET_MODE (SUBREG_REG (inner_op0
))
11944 == GET_MODE (SUBREG_REG (inner_op1
)))
11945 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0
)))
11946 <= HOST_BITS_PER_WIDE_INT
)
11947 && (0 == ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
11948 GET_MODE (SUBREG_REG (inner_op0
)))))
11949 && (0 == ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
11950 GET_MODE (SUBREG_REG (inner_op1
))))))
11952 op0
= SUBREG_REG (inner_op0
);
11953 op1
= SUBREG_REG (inner_op1
);
11955 /* The resulting comparison is always unsigned since we masked
11956 off the original sign bit. */
11957 code
= unsigned_condition (code
);
11963 FOR_EACH_MODE_UNTIL (tmode
,
11964 as_a
<scalar_int_mode
> (GET_MODE (op0
)))
11965 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
11967 op0
= gen_lowpart_or_truncate (tmode
, inner_op0
);
11968 op1
= gen_lowpart_or_truncate (tmode
, inner_op1
);
11969 code
= unsigned_condition (code
);
11978 /* If both operands are NOT, we can strip off the outer operation
11979 and adjust the comparison code for swapped operands; similarly for
11980 NEG, except that this must be an equality comparison. */
11981 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
11982 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
11983 && (code
== EQ
|| code
== NE
)))
11984 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
11990 /* If the first operand is a constant, swap the operands and adjust the
11991 comparison code appropriately, but don't do this if the second operand
11992 is already a constant integer. */
11993 if (swap_commutative_operands_p (op0
, op1
))
11995 std::swap (op0
, op1
);
11996 code
= swap_condition (code
);
11999 /* We now enter a loop during which we will try to simplify the comparison.
12000 For the most part, we only are concerned with comparisons with zero,
12001 but some things may really be comparisons with zero but not start
12002 out looking that way. */
12004 while (CONST_INT_P (op1
))
12006 machine_mode raw_mode
= GET_MODE (op0
);
12007 scalar_int_mode int_mode
;
12008 int equality_comparison_p
;
12009 int sign_bit_comparison_p
;
12010 int unsigned_comparison_p
;
12011 HOST_WIDE_INT const_op
;
12013 /* We only want to handle integral modes. This catches VOIDmode,
12014 CCmode, and the floating-point modes. An exception is that we
12015 can handle VOIDmode if OP0 is a COMPARE or a comparison
12018 if (GET_MODE_CLASS (raw_mode
) != MODE_INT
12019 && ! (raw_mode
== VOIDmode
12020 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
12023 /* Try to simplify the compare to constant, possibly changing the
12024 comparison op, and/or changing op1 to zero. */
12025 code
= simplify_compare_const (code
, raw_mode
, op0
, &op1
);
12026 const_op
= INTVAL (op1
);
12028 /* Compute some predicates to simplify code below. */
12030 equality_comparison_p
= (code
== EQ
|| code
== NE
);
12031 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
12032 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
12035 /* If this is a sign bit comparison and we can do arithmetic in
12036 MODE, say that we will only be needing the sign bit of OP0. */
12037 if (sign_bit_comparison_p
12038 && is_a
<scalar_int_mode
> (raw_mode
, &int_mode
)
12039 && HWI_COMPUTABLE_MODE_P (int_mode
))
12040 op0
= force_to_mode (op0
, int_mode
,
12042 << (GET_MODE_PRECISION (int_mode
) - 1),
12045 if (COMPARISON_P (op0
))
12047 /* We can't do anything if OP0 is a condition code value, rather
12048 than an actual data value. */
12050 || CC0_P (XEXP (op0
, 0))
12051 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
12054 /* Get the two operands being compared. */
12055 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
12056 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
12058 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
12060 /* Check for the cases where we simply want the result of the
12061 earlier test or the opposite of that result. */
12062 if (code
== NE
|| code
== EQ
12063 || (val_signbit_known_set_p (raw_mode
, STORE_FLAG_VALUE
)
12064 && (code
== LT
|| code
== GE
)))
12066 enum rtx_code new_code
;
12067 if (code
== LT
|| code
== NE
)
12068 new_code
= GET_CODE (op0
);
12070 new_code
= reversed_comparison_code (op0
, NULL
);
12072 if (new_code
!= UNKNOWN
)
12083 if (raw_mode
== VOIDmode
)
12085 scalar_int_mode mode
= as_a
<scalar_int_mode
> (raw_mode
);
12087 /* Now try cases based on the opcode of OP0. If none of the cases
12088 does a "continue", we exit this loop immediately after the
12091 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
12092 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
12093 switch (GET_CODE (op0
))
12096 /* If we are extracting a single bit from a variable position in
12097 a constant that has only a single bit set and are comparing it
12098 with zero, we can convert this into an equality comparison
12099 between the position and the location of the single bit. */
12100 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12101 have already reduced the shift count modulo the word size. */
12102 if (!SHIFT_COUNT_TRUNCATED
12103 && CONST_INT_P (XEXP (op0
, 0))
12104 && XEXP (op0
, 1) == const1_rtx
12105 && equality_comparison_p
&& const_op
== 0
12106 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
12108 if (BITS_BIG_ENDIAN
)
12109 i
= BITS_PER_WORD
- 1 - i
;
12111 op0
= XEXP (op0
, 2);
12115 /* Result is nonzero iff shift count is equal to I. */
12116 code
= reverse_condition (code
);
12123 tem
= expand_compound_operation (op0
);
12132 /* If testing for equality, we can take the NOT of the constant. */
12133 if (equality_comparison_p
12134 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
12136 op0
= XEXP (op0
, 0);
12141 /* If just looking at the sign bit, reverse the sense of the
12143 if (sign_bit_comparison_p
)
12145 op0
= XEXP (op0
, 0);
12146 code
= (code
== GE
? LT
: GE
);
12152 /* If testing for equality, we can take the NEG of the constant. */
12153 if (equality_comparison_p
12154 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
12156 op0
= XEXP (op0
, 0);
12161 /* The remaining cases only apply to comparisons with zero. */
12165 /* When X is ABS or is known positive,
12166 (neg X) is < 0 if and only if X != 0. */
12168 if (sign_bit_comparison_p
12169 && (GET_CODE (XEXP (op0
, 0)) == ABS
12170 || (mode_width
<= HOST_BITS_PER_WIDE_INT
12171 && (nonzero_bits (XEXP (op0
, 0), mode
)
12172 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12175 op0
= XEXP (op0
, 0);
12176 code
= (code
== LT
? NE
: EQ
);
12180 /* If we have NEG of something whose two high-order bits are the
12181 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12182 if (num_sign_bit_copies (op0
, mode
) >= 2)
12184 op0
= XEXP (op0
, 0);
12185 code
= swap_condition (code
);
12191 /* If we are testing equality and our count is a constant, we
12192 can perform the inverse operation on our RHS. */
12193 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12194 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
12195 op1
, XEXP (op0
, 1))) != 0)
12197 op0
= XEXP (op0
, 0);
12202 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12203 a particular bit. Convert it to an AND of a constant of that
12204 bit. This will be converted into a ZERO_EXTRACT. */
12205 if (const_op
== 0 && sign_bit_comparison_p
12206 && CONST_INT_P (XEXP (op0
, 1))
12207 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12209 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12212 - INTVAL (XEXP (op0
, 1)))));
12213 code
= (code
== LT
? NE
: EQ
);
12217 /* Fall through. */
12220 /* ABS is ignorable inside an equality comparison with zero. */
12221 if (const_op
== 0 && equality_comparison_p
)
12223 op0
= XEXP (op0
, 0);
12229 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12230 (compare FOO CONST) if CONST fits in FOO's mode and we
12231 are either testing inequality or have an unsigned
12232 comparison with ZERO_EXTEND or a signed comparison with
12233 SIGN_EXTEND. But don't do it if we don't have a compare
12234 insn of the given mode, since we'd have to revert it
12235 later on, and then we wouldn't know whether to sign- or
12237 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12238 && ! unsigned_comparison_p
12239 && HWI_COMPUTABLE_MODE_P (mode
)
12240 && trunc_int_for_mode (const_op
, mode
) == const_op
12241 && have_insn_for (COMPARE
, mode
))
12243 op0
= XEXP (op0
, 0);
12249 /* Check for the case where we are comparing A - C1 with C2, that is
12251 (subreg:MODE (plus (A) (-C1))) op (C2)
12253 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12254 comparison in the wider mode. One of the following two conditions
12255 must be true in order for this to be valid:
12257 1. The mode extension results in the same bit pattern being added
12258 on both sides and the comparison is equality or unsigned. As
12259 C2 has been truncated to fit in MODE, the pattern can only be
12262 2. The mode extension results in the sign bit being copied on
12265 The difficulty here is that we have predicates for A but not for
12266 (A - C1) so we need to check that C1 is within proper bounds so
12267 as to perturbate A as little as possible. */
12269 if (mode_width
<= HOST_BITS_PER_WIDE_INT
12270 && subreg_lowpart_p (op0
)
12271 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
12273 && GET_MODE_PRECISION (inner_mode
) > mode_width
12274 && GET_CODE (SUBREG_REG (op0
)) == PLUS
12275 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
12277 rtx a
= XEXP (SUBREG_REG (op0
), 0);
12278 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
12281 && (unsigned HOST_WIDE_INT
) c1
12282 < HOST_WIDE_INT_1U
<< (mode_width
- 1)
12283 && (equality_comparison_p
|| unsigned_comparison_p
)
12284 /* (A - C1) zero-extends if it is positive and sign-extends
12285 if it is negative, C2 both zero- and sign-extends. */
12286 && ((0 == (nonzero_bits (a
, inner_mode
)
12287 & ~GET_MODE_MASK (mode
))
12289 /* (A - C1) sign-extends if it is positive and 1-extends
12290 if it is negative, C2 both sign- and 1-extends. */
12291 || (num_sign_bit_copies (a
, inner_mode
)
12292 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12295 || ((unsigned HOST_WIDE_INT
) c1
12296 < HOST_WIDE_INT_1U
<< (mode_width
- 2)
12297 /* (A - C1) always sign-extends, like C2. */
12298 && num_sign_bit_copies (a
, inner_mode
)
12299 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12300 - (mode_width
- 1))))
12302 op0
= SUBREG_REG (op0
);
12307 /* If the inner mode is narrower and we are extracting the low part,
12308 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12309 if (paradoxical_subreg_p (op0
))
12311 else if (subreg_lowpart_p (op0
)
12312 && GET_MODE_CLASS (mode
) == MODE_INT
12313 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12314 && (code
== NE
|| code
== EQ
)
12315 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12316 && !paradoxical_subreg_p (op0
)
12317 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12318 & ~GET_MODE_MASK (mode
)) == 0)
12320 /* Remove outer subregs that don't do anything. */
12321 tem
= gen_lowpart (inner_mode
, op1
);
12323 if ((nonzero_bits (tem
, inner_mode
)
12324 & ~GET_MODE_MASK (mode
)) == 0)
12326 op0
= SUBREG_REG (op0
);
12338 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12339 && (unsigned_comparison_p
|| equality_comparison_p
)
12340 && HWI_COMPUTABLE_MODE_P (mode
)
12341 && (unsigned HOST_WIDE_INT
) const_op
<= GET_MODE_MASK (mode
)
12343 && have_insn_for (COMPARE
, mode
))
12345 op0
= XEXP (op0
, 0);
12351 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12352 this for equality comparisons due to pathological cases involving
12354 if (equality_comparison_p
12355 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
12356 op1
, XEXP (op0
, 1))))
12358 op0
= XEXP (op0
, 0);
12363 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12364 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
12365 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
12367 op0
= XEXP (XEXP (op0
, 0), 0);
12368 code
= (code
== LT
? EQ
: NE
);
12374 /* We used to optimize signed comparisons against zero, but that
12375 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12376 arrive here as equality comparisons, or (GEU, LTU) are
12377 optimized away. No need to special-case them. */
12379 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12380 (eq B (minus A C)), whichever simplifies. We can only do
12381 this for equality comparisons due to pathological cases involving
12383 if (equality_comparison_p
12384 && 0 != (tem
= simplify_binary_operation (PLUS
, mode
,
12385 XEXP (op0
, 1), op1
)))
12387 op0
= XEXP (op0
, 0);
12392 if (equality_comparison_p
12393 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
12394 XEXP (op0
, 0), op1
)))
12396 op0
= XEXP (op0
, 1);
12401 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12402 of bits in X minus 1, is one iff X > 0. */
12403 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
12404 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12405 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
12406 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12408 op0
= XEXP (op0
, 1);
12409 code
= (code
== GE
? LE
: GT
);
12415 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12416 if C is zero or B is a constant. */
12417 if (equality_comparison_p
12418 && 0 != (tem
= simplify_binary_operation (XOR
, mode
,
12419 XEXP (op0
, 1), op1
)))
12421 op0
= XEXP (op0
, 0);
12429 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12431 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
12432 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
12433 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12435 op0
= XEXP (op0
, 1);
12436 code
= (code
== GE
? GT
: LE
);
12442 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12443 will be converted to a ZERO_EXTRACT later. */
12444 if (const_op
== 0 && equality_comparison_p
12445 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12446 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
12448 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
12449 XEXP (XEXP (op0
, 0), 1));
12450 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12454 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12455 zero and X is a comparison and C1 and C2 describe only bits set
12456 in STORE_FLAG_VALUE, we can compare with X. */
12457 if (const_op
== 0 && equality_comparison_p
12458 && mode_width
<= HOST_BITS_PER_WIDE_INT
12459 && CONST_INT_P (XEXP (op0
, 1))
12460 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
12461 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12462 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
12463 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
12465 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12466 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
12467 if ((~STORE_FLAG_VALUE
& mask
) == 0
12468 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
12469 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
12470 && COMPARISON_P (tem
))))
12472 op0
= XEXP (XEXP (op0
, 0), 0);
12477 /* If we are doing an equality comparison of an AND of a bit equal
12478 to the sign bit, replace this with a LT or GE comparison of
12479 the underlying value. */
12480 if (equality_comparison_p
12482 && CONST_INT_P (XEXP (op0
, 1))
12483 && mode_width
<= HOST_BITS_PER_WIDE_INT
12484 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12485 == HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12487 op0
= XEXP (op0
, 0);
12488 code
= (code
== EQ
? GE
: LT
);
12492 /* If this AND operation is really a ZERO_EXTEND from a narrower
12493 mode, the constant fits within that mode, and this is either an
12494 equality or unsigned comparison, try to do this comparison in
12499 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12500 -> (ne:DI (reg:SI 4) (const_int 0))
12502 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12503 known to hold a value of the required mode the
12504 transformation is invalid. */
12505 if ((equality_comparison_p
|| unsigned_comparison_p
)
12506 && CONST_INT_P (XEXP (op0
, 1))
12507 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
12508 & GET_MODE_MASK (mode
))
12510 && const_op
>> i
== 0
12511 && int_mode_for_size (i
, 1).exists (&tmode
))
12513 op0
= gen_lowpart_or_truncate (tmode
, XEXP (op0
, 0));
12517 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12518 fits in both M1 and M2 and the SUBREG is either paradoxical
12519 or represents the low part, permute the SUBREG and the AND
12521 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
12522 && CONST_INT_P (XEXP (op0
, 1)))
12524 unsigned HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
12525 /* Require an integral mode, to avoid creating something like
12527 if ((is_a
<scalar_int_mode
>
12528 (GET_MODE (SUBREG_REG (XEXP (op0
, 0))), &tmode
))
12529 /* It is unsafe to commute the AND into the SUBREG if the
12530 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12531 not defined. As originally written the upper bits
12532 have a defined value due to the AND operation.
12533 However, if we commute the AND inside the SUBREG then
12534 they no longer have defined values and the meaning of
12535 the code has been changed.
12536 Also C1 should not change value in the smaller mode,
12537 see PR67028 (a positive C1 can become negative in the
12538 smaller mode, so that the AND does no longer mask the
12540 && ((WORD_REGISTER_OPERATIONS
12541 && mode_width
> GET_MODE_PRECISION (tmode
)
12542 && mode_width
<= BITS_PER_WORD
12543 && trunc_int_for_mode (c1
, tmode
) == (HOST_WIDE_INT
) c1
)
12544 || (mode_width
<= GET_MODE_PRECISION (tmode
)
12545 && subreg_lowpart_p (XEXP (op0
, 0))))
12546 && mode_width
<= HOST_BITS_PER_WIDE_INT
12547 && HWI_COMPUTABLE_MODE_P (tmode
)
12548 && (c1
& ~mask
) == 0
12549 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
12551 && c1
!= GET_MODE_MASK (tmode
))
12553 op0
= simplify_gen_binary (AND
, tmode
,
12554 SUBREG_REG (XEXP (op0
, 0)),
12555 gen_int_mode (c1
, tmode
));
12556 op0
= gen_lowpart (mode
, op0
);
12561 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12562 if (const_op
== 0 && equality_comparison_p
12563 && XEXP (op0
, 1) == const1_rtx
12564 && GET_CODE (XEXP (op0
, 0)) == NOT
)
12566 op0
= simplify_and_const_int (NULL_RTX
, mode
,
12567 XEXP (XEXP (op0
, 0), 0), 1);
12568 code
= (code
== NE
? EQ
: NE
);
12572 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12573 (eq (and (lshiftrt X) 1) 0).
12574 Also handle the case where (not X) is expressed using xor. */
12575 if (const_op
== 0 && equality_comparison_p
12576 && XEXP (op0
, 1) == const1_rtx
12577 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
12579 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
12580 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
12582 if (GET_CODE (shift_op
) == NOT
12583 || (GET_CODE (shift_op
) == XOR
12584 && CONST_INT_P (XEXP (shift_op
, 1))
12585 && CONST_INT_P (shift_count
)
12586 && HWI_COMPUTABLE_MODE_P (mode
)
12587 && (UINTVAL (XEXP (shift_op
, 1))
12588 == HOST_WIDE_INT_1U
12589 << INTVAL (shift_count
))))
12592 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
12593 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12594 code
= (code
== NE
? EQ
: NE
);
12601 /* If we have (compare (ashift FOO N) (const_int C)) and
12602 the high order N bits of FOO (N+1 if an inequality comparison)
12603 are known to be zero, we can do this by comparing FOO with C
12604 shifted right N bits so long as the low-order N bits of C are
12606 if (CONST_INT_P (XEXP (op0
, 1))
12607 && INTVAL (XEXP (op0
, 1)) >= 0
12608 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
12609 < HOST_BITS_PER_WIDE_INT
)
12610 && (((unsigned HOST_WIDE_INT
) const_op
12611 & ((HOST_WIDE_INT_1U
<< INTVAL (XEXP (op0
, 1)))
12613 && mode_width
<= HOST_BITS_PER_WIDE_INT
12614 && (nonzero_bits (XEXP (op0
, 0), mode
)
12615 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
12616 + ! equality_comparison_p
))) == 0)
12618 /* We must perform a logical shift, not an arithmetic one,
12619 as we want the top N bits of C to be zero. */
12620 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
12622 temp
>>= INTVAL (XEXP (op0
, 1));
12623 op1
= gen_int_mode (temp
, mode
);
12624 op0
= XEXP (op0
, 0);
12628 /* If we are doing a sign bit comparison, it means we are testing
12629 a particular bit. Convert it to the appropriate AND. */
12630 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12631 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12633 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12636 - INTVAL (XEXP (op0
, 1)))));
12637 code
= (code
== LT
? NE
: EQ
);
12641 /* If this an equality comparison with zero and we are shifting
12642 the low bit to the sign bit, we can convert this to an AND of the
12644 if (const_op
== 0 && equality_comparison_p
12645 && CONST_INT_P (XEXP (op0
, 1))
12646 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12648 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
12654 /* If this is an equality comparison with zero, we can do this
12655 as a logical shift, which might be much simpler. */
12656 if (equality_comparison_p
&& const_op
== 0
12657 && CONST_INT_P (XEXP (op0
, 1)))
12659 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
12661 INTVAL (XEXP (op0
, 1)));
12665 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12666 do the comparison in a narrower mode. */
12667 if (! unsigned_comparison_p
12668 && CONST_INT_P (XEXP (op0
, 1))
12669 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12670 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12671 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12673 && (((unsigned HOST_WIDE_INT
) const_op
12674 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12675 <= GET_MODE_MASK (tmode
)))
12677 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
12681 /* Likewise if OP0 is a PLUS of a sign extension with a
12682 constant, which is usually represented with the PLUS
12683 between the shifts. */
12684 if (! unsigned_comparison_p
12685 && CONST_INT_P (XEXP (op0
, 1))
12686 && GET_CODE (XEXP (op0
, 0)) == PLUS
12687 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12688 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
12689 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
12690 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12692 && (((unsigned HOST_WIDE_INT
) const_op
12693 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12694 <= GET_MODE_MASK (tmode
)))
12696 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
12697 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
12698 rtx new_const
= simplify_gen_binary (ASHIFTRT
, mode
,
12699 add_const
, XEXP (op0
, 1));
12701 op0
= simplify_gen_binary (PLUS
, tmode
,
12702 gen_lowpart (tmode
, inner
),
12709 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12710 the low order N bits of FOO are known to be zero, we can do this
12711 by comparing FOO with C shifted left N bits so long as no
12712 overflow occurs. Even if the low order N bits of FOO aren't known
12713 to be zero, if the comparison is >= or < we can use the same
12714 optimization and for > or <= by setting all the low
12715 order N bits in the comparison constant. */
12716 if (CONST_INT_P (XEXP (op0
, 1))
12717 && INTVAL (XEXP (op0
, 1)) > 0
12718 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12719 && mode_width
<= HOST_BITS_PER_WIDE_INT
12720 && (((unsigned HOST_WIDE_INT
) const_op
12721 + (GET_CODE (op0
) != LSHIFTRT
12722 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
12725 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
12727 unsigned HOST_WIDE_INT low_bits
12728 = (nonzero_bits (XEXP (op0
, 0), mode
)
12729 & ((HOST_WIDE_INT_1U
12730 << INTVAL (XEXP (op0
, 1))) - 1));
12731 if (low_bits
== 0 || !equality_comparison_p
)
12733 /* If the shift was logical, then we must make the condition
12735 if (GET_CODE (op0
) == LSHIFTRT
)
12736 code
= unsigned_condition (code
);
12738 const_op
= (unsigned HOST_WIDE_INT
) const_op
12739 << INTVAL (XEXP (op0
, 1));
12741 && (code
== GT
|| code
== GTU
12742 || code
== LE
|| code
== LEU
))
12744 |= ((HOST_WIDE_INT_1
<< INTVAL (XEXP (op0
, 1))) - 1);
12745 op1
= GEN_INT (const_op
);
12746 op0
= XEXP (op0
, 0);
12751 /* If we are using this shift to extract just the sign bit, we
12752 can replace this with an LT or GE comparison. */
12754 && (equality_comparison_p
|| sign_bit_comparison_p
)
12755 && CONST_INT_P (XEXP (op0
, 1))
12756 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12758 op0
= XEXP (op0
, 0);
12759 code
= (code
== NE
|| code
== GT
? LT
: GE
);
12771 /* Now make any compound operations involved in this comparison. Then,
12772 check for an outmost SUBREG on OP0 that is not doing anything or is
12773 paradoxical. The latter transformation must only be performed when
12774 it is known that the "extra" bits will be the same in op0 and op1 or
12775 that they don't matter. There are three cases to consider:
12777 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12778 care bits and we can assume they have any convenient value. So
12779 making the transformation is safe.
12781 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12782 In this case the upper bits of op0 are undefined. We should not make
12783 the simplification in that case as we do not know the contents of
12786 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12787 In that case we know those bits are zeros or ones. We must also be
12788 sure that they are the same as the upper bits of op1.
12790 We can never remove a SUBREG for a non-equality comparison because
12791 the sign bit is in a different place in the underlying object. */
12793 rtx_code op0_mco_code
= SET
;
12794 if (op1
== const0_rtx
)
12795 op0_mco_code
= code
== NE
|| code
== EQ
? EQ
: COMPARE
;
12797 op0
= make_compound_operation (op0
, op0_mco_code
);
12798 op1
= make_compound_operation (op1
, SET
);
12800 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
12801 && is_int_mode (GET_MODE (op0
), &mode
)
12802 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12803 && (code
== NE
|| code
== EQ
))
12805 if (paradoxical_subreg_p (op0
))
12807 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12809 if (REG_P (SUBREG_REG (op0
)))
12811 op0
= SUBREG_REG (op0
);
12812 op1
= gen_lowpart (inner_mode
, op1
);
12815 else if (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12816 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12817 & ~GET_MODE_MASK (mode
)) == 0)
12819 tem
= gen_lowpart (inner_mode
, op1
);
12821 if ((nonzero_bits (tem
, inner_mode
) & ~GET_MODE_MASK (mode
)) == 0)
12822 op0
= SUBREG_REG (op0
), op1
= tem
;
12826 /* We now do the opposite procedure: Some machines don't have compare
12827 insns in all modes. If OP0's mode is an integer mode smaller than a
12828 word and we can't do a compare in that mode, see if there is a larger
12829 mode for which we can do the compare. There are a number of cases in
12830 which we can use the wider mode. */
12832 if (is_int_mode (GET_MODE (op0
), &mode
)
12833 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
12834 && ! have_insn_for (COMPARE
, mode
))
12835 FOR_EACH_WIDER_MODE (tmode_iter
, mode
)
12837 tmode
= tmode_iter
.require ();
12838 if (!HWI_COMPUTABLE_MODE_P (tmode
))
12840 if (have_insn_for (COMPARE
, tmode
))
12844 /* If this is a test for negative, we can make an explicit
12845 test of the sign bit. Test this first so we can use
12846 a paradoxical subreg to extend OP0. */
12848 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
12849 && HWI_COMPUTABLE_MODE_P (mode
))
12851 unsigned HOST_WIDE_INT sign
12852 = HOST_WIDE_INT_1U
<< (GET_MODE_BITSIZE (mode
) - 1);
12853 op0
= simplify_gen_binary (AND
, tmode
,
12854 gen_lowpart (tmode
, op0
),
12855 gen_int_mode (sign
, tmode
));
12856 code
= (code
== LT
) ? NE
: EQ
;
12860 /* If the only nonzero bits in OP0 and OP1 are those in the
12861 narrower mode and this is an equality or unsigned comparison,
12862 we can use the wider mode. Similarly for sign-extended
12863 values, in which case it is true for all comparisons. */
12864 zero_extended
= ((code
== EQ
|| code
== NE
12865 || code
== GEU
|| code
== GTU
12866 || code
== LEU
|| code
== LTU
)
12867 && (nonzero_bits (op0
, tmode
)
12868 & ~GET_MODE_MASK (mode
)) == 0
12869 && ((CONST_INT_P (op1
)
12870 || (nonzero_bits (op1
, tmode
)
12871 & ~GET_MODE_MASK (mode
)) == 0)));
12874 || ((num_sign_bit_copies (op0
, tmode
)
12875 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12876 - GET_MODE_PRECISION (mode
)))
12877 && (num_sign_bit_copies (op1
, tmode
)
12878 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12879 - GET_MODE_PRECISION (mode
)))))
12881 /* If OP0 is an AND and we don't have an AND in MODE either,
12882 make a new AND in the proper mode. */
12883 if (GET_CODE (op0
) == AND
12884 && !have_insn_for (AND
, mode
))
12885 op0
= simplify_gen_binary (AND
, tmode
,
12886 gen_lowpart (tmode
,
12888 gen_lowpart (tmode
,
12894 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
12896 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
12901 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
12903 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
12912 /* We may have changed the comparison operands. Re-canonicalize. */
12913 if (swap_commutative_operands_p (op0
, op1
))
12915 std::swap (op0
, op1
);
12916 code
= swap_condition (code
);
12919 /* If this machine only supports a subset of valid comparisons, see if we
12920 can convert an unsupported one into a supported one. */
12921 target_canonicalize_comparison (&code
, &op0
, &op1
, 0);
12929 /* Utility function for record_value_for_reg. Count number of
12934 enum rtx_code code
= GET_CODE (x
);
12938 if (GET_RTX_CLASS (code
) == RTX_BIN_ARITH
12939 || GET_RTX_CLASS (code
) == RTX_COMM_ARITH
)
12941 rtx x0
= XEXP (x
, 0);
12942 rtx x1
= XEXP (x
, 1);
12945 return 1 + 2 * count_rtxs (x0
);
12947 if ((GET_RTX_CLASS (GET_CODE (x1
)) == RTX_BIN_ARITH
12948 || GET_RTX_CLASS (GET_CODE (x1
)) == RTX_COMM_ARITH
)
12949 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12950 return 2 + 2 * count_rtxs (x0
)
12951 + count_rtxs (x
== XEXP (x1
, 0)
12952 ? XEXP (x1
, 1) : XEXP (x1
, 0));
12954 if ((GET_RTX_CLASS (GET_CODE (x0
)) == RTX_BIN_ARITH
12955 || GET_RTX_CLASS (GET_CODE (x0
)) == RTX_COMM_ARITH
)
12956 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12957 return 2 + 2 * count_rtxs (x1
)
12958 + count_rtxs (x
== XEXP (x0
, 0)
12959 ? XEXP (x0
, 1) : XEXP (x0
, 0));
12962 fmt
= GET_RTX_FORMAT (code
);
12963 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12965 ret
+= count_rtxs (XEXP (x
, i
));
12966 else if (fmt
[i
] == 'E')
12967 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12968 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
12973 /* Utility function for following routine. Called when X is part of a value
12974 being stored into last_set_value. Sets last_set_table_tick
12975 for each register mentioned. Similar to mention_regs in cse.c */
12978 update_table_tick (rtx x
)
12980 enum rtx_code code
= GET_CODE (x
);
12981 const char *fmt
= GET_RTX_FORMAT (code
);
12986 unsigned int regno
= REGNO (x
);
12987 unsigned int endregno
= END_REGNO (x
);
12990 for (r
= regno
; r
< endregno
; r
++)
12992 reg_stat_type
*rsp
= ®_stat
[r
];
12993 rsp
->last_set_table_tick
= label_tick
;
12999 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13002 /* Check for identical subexpressions. If x contains
13003 identical subexpression we only have to traverse one of
13005 if (i
== 0 && ARITHMETIC_P (x
))
13007 /* Note that at this point x1 has already been
13009 rtx x0
= XEXP (x
, 0);
13010 rtx x1
= XEXP (x
, 1);
13012 /* If x0 and x1 are identical then there is no need to
13017 /* If x0 is identical to a subexpression of x1 then while
13018 processing x1, x0 has already been processed. Thus we
13019 are done with x. */
13020 if (ARITHMETIC_P (x1
)
13021 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13024 /* If x1 is identical to a subexpression of x0 then we
13025 still have to process the rest of x0. */
13026 if (ARITHMETIC_P (x0
)
13027 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13029 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
13034 update_table_tick (XEXP (x
, i
));
13036 else if (fmt
[i
] == 'E')
13037 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13038 update_table_tick (XVECEXP (x
, i
, j
));
13041 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13042 are saying that the register is clobbered and we no longer know its
13043 value. If INSN is zero, don't update reg_stat[].last_set; this is
13044 only permitted with VALUE also zero and is used to invalidate the
13048 record_value_for_reg (rtx reg
, rtx_insn
*insn
, rtx value
)
13050 unsigned int regno
= REGNO (reg
);
13051 unsigned int endregno
= END_REGNO (reg
);
13053 reg_stat_type
*rsp
;
13055 /* If VALUE contains REG and we have a previous value for REG, substitute
13056 the previous value. */
13057 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
13061 /* Set things up so get_last_value is allowed to see anything set up to
13063 subst_low_luid
= DF_INSN_LUID (insn
);
13064 tem
= get_last_value (reg
);
13066 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13067 it isn't going to be useful and will take a lot of time to process,
13068 so just use the CLOBBER. */
13072 if (ARITHMETIC_P (tem
)
13073 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
13074 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
13075 tem
= XEXP (tem
, 0);
13076 else if (count_occurrences (value
, reg
, 1) >= 2)
13078 /* If there are two or more occurrences of REG in VALUE,
13079 prevent the value from growing too much. */
13080 if (count_rtxs (tem
) > MAX_LAST_VALUE_RTL
)
13081 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
13084 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
13088 /* For each register modified, show we don't know its value, that
13089 we don't know about its bitwise content, that its value has been
13090 updated, and that we don't know the location of the death of the
13092 for (i
= regno
; i
< endregno
; i
++)
13094 rsp
= ®_stat
[i
];
13097 rsp
->last_set
= insn
;
13099 rsp
->last_set_value
= 0;
13100 rsp
->last_set_mode
= VOIDmode
;
13101 rsp
->last_set_nonzero_bits
= 0;
13102 rsp
->last_set_sign_bit_copies
= 0;
13103 rsp
->last_death
= 0;
13104 rsp
->truncated_to_mode
= VOIDmode
;
13107 /* Mark registers that are being referenced in this value. */
13109 update_table_tick (value
);
13111 /* Now update the status of each register being set.
13112 If someone is using this register in this block, set this register
13113 to invalid since we will get confused between the two lives in this
13114 basic block. This makes using this register always invalid. In cse, we
13115 scan the table to invalidate all entries using this register, but this
13116 is too much work for us. */
13118 for (i
= regno
; i
< endregno
; i
++)
13120 rsp
= ®_stat
[i
];
13121 rsp
->last_set_label
= label_tick
;
13123 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
13124 rsp
->last_set_invalid
= 1;
13126 rsp
->last_set_invalid
= 0;
13129 /* The value being assigned might refer to X (like in "x++;"). In that
13130 case, we must replace it with (clobber (const_int 0)) to prevent
13132 rsp
= ®_stat
[regno
];
13133 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
13135 value
= copy_rtx (value
);
13136 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
13140 /* For the main register being modified, update the value, the mode, the
13141 nonzero bits, and the number of sign bit copies. */
13143 rsp
->last_set_value
= value
;
13147 machine_mode mode
= GET_MODE (reg
);
13148 subst_low_luid
= DF_INSN_LUID (insn
);
13149 rsp
->last_set_mode
= mode
;
13150 if (GET_MODE_CLASS (mode
) == MODE_INT
13151 && HWI_COMPUTABLE_MODE_P (mode
))
13152 mode
= nonzero_bits_mode
;
13153 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
13154 rsp
->last_set_sign_bit_copies
13155 = num_sign_bit_copies (value
, GET_MODE (reg
));
13159 /* Called via note_stores from record_dead_and_set_regs to handle one
13160 SET or CLOBBER in an insn. DATA is the instruction in which the
13161 set is occurring. */
13164 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
13166 rtx_insn
*record_dead_insn
= (rtx_insn
*) data
;
13168 if (GET_CODE (dest
) == SUBREG
)
13169 dest
= SUBREG_REG (dest
);
13171 if (!record_dead_insn
)
13174 record_value_for_reg (dest
, NULL
, NULL_RTX
);
13180 /* If we are setting the whole register, we know its value. Otherwise
13181 show that we don't know the value. We can handle SUBREG in
13183 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
13184 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
13185 else if (GET_CODE (setter
) == SET
13186 && GET_CODE (SET_DEST (setter
)) == SUBREG
13187 && SUBREG_REG (SET_DEST (setter
)) == dest
13188 && GET_MODE_PRECISION (GET_MODE (dest
)) <= BITS_PER_WORD
13189 && subreg_lowpart_p (SET_DEST (setter
)))
13190 record_value_for_reg (dest
, record_dead_insn
,
13191 gen_lowpart (GET_MODE (dest
),
13192 SET_SRC (setter
)));
13194 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
13196 else if (MEM_P (dest
)
13197 /* Ignore pushes, they clobber nothing. */
13198 && ! push_operand (dest
, GET_MODE (dest
)))
13199 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
13202 /* Update the records of when each REG was most recently set or killed
13203 for the things done by INSN. This is the last thing done in processing
13204 INSN in the combiner loop.
13206 We update reg_stat[], in particular fields last_set, last_set_value,
13207 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13208 last_death, and also the similar information mem_last_set (which insn
13209 most recently modified memory) and last_call_luid (which insn was the
13210 most recent subroutine call). */
13213 record_dead_and_set_regs (rtx_insn
*insn
)
13218 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
13220 if (REG_NOTE_KIND (link
) == REG_DEAD
13221 && REG_P (XEXP (link
, 0)))
13223 unsigned int regno
= REGNO (XEXP (link
, 0));
13224 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
13226 for (i
= regno
; i
< endregno
; i
++)
13228 reg_stat_type
*rsp
;
13230 rsp
= ®_stat
[i
];
13231 rsp
->last_death
= insn
;
13234 else if (REG_NOTE_KIND (link
) == REG_INC
)
13235 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
13240 hard_reg_set_iterator hrsi
;
13241 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call
, 0, i
, hrsi
)
13243 reg_stat_type
*rsp
;
13245 rsp
= ®_stat
[i
];
13246 rsp
->last_set_invalid
= 1;
13247 rsp
->last_set
= insn
;
13248 rsp
->last_set_value
= 0;
13249 rsp
->last_set_mode
= VOIDmode
;
13250 rsp
->last_set_nonzero_bits
= 0;
13251 rsp
->last_set_sign_bit_copies
= 0;
13252 rsp
->last_death
= 0;
13253 rsp
->truncated_to_mode
= VOIDmode
;
13256 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
13258 /* We can't combine into a call pattern. Remember, though, that
13259 the return value register is set at this LUID. We could
13260 still replace a register with the return value from the
13261 wrong subroutine call! */
13262 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, NULL_RTX
);
13265 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, insn
);
13268 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13269 register present in the SUBREG, so for each such SUBREG go back and
13270 adjust nonzero and sign bit information of the registers that are
13271 known to have some zero/sign bits set.
13273 This is needed because when combine blows the SUBREGs away, the
13274 information on zero/sign bits is lost and further combines can be
13275 missed because of that. */
13278 record_promoted_value (rtx_insn
*insn
, rtx subreg
)
13280 struct insn_link
*links
;
13282 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
13283 machine_mode mode
= GET_MODE (subreg
);
13285 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
13288 for (links
= LOG_LINKS (insn
); links
;)
13290 reg_stat_type
*rsp
;
13292 insn
= links
->insn
;
13293 set
= single_set (insn
);
13295 if (! set
|| !REG_P (SET_DEST (set
))
13296 || REGNO (SET_DEST (set
)) != regno
13297 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
13299 links
= links
->next
;
13303 rsp
= ®_stat
[regno
];
13304 if (rsp
->last_set
== insn
)
13306 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
))
13307 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
13310 if (REG_P (SET_SRC (set
)))
13312 regno
= REGNO (SET_SRC (set
));
13313 links
= LOG_LINKS (insn
);
13320 /* Check if X, a register, is known to contain a value already
13321 truncated to MODE. In this case we can use a subreg to refer to
13322 the truncated value even though in the generic case we would need
13323 an explicit truncation. */
13326 reg_truncated_to_mode (machine_mode mode
, const_rtx x
)
13328 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
13329 machine_mode truncated
= rsp
->truncated_to_mode
;
13332 || rsp
->truncation_label
< label_tick_ebb_start
)
13334 if (!partial_subreg_p (mode
, truncated
))
13336 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, truncated
))
13341 /* If X is a hard reg or a subreg record the mode that the register is
13342 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13343 able to turn a truncate into a subreg using this information. Return true
13344 if traversing X is complete. */
13347 record_truncated_value (rtx x
)
13349 machine_mode truncated_mode
;
13350 reg_stat_type
*rsp
;
13352 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
13354 machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
13355 truncated_mode
= GET_MODE (x
);
13357 if (!partial_subreg_p (truncated_mode
, original_mode
))
13360 truncated_mode
= GET_MODE (x
);
13361 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode
, original_mode
))
13364 x
= SUBREG_REG (x
);
13366 /* ??? For hard-regs we now record everything. We might be able to
13367 optimize this using last_set_mode. */
13368 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
13369 truncated_mode
= GET_MODE (x
);
13373 rsp
= ®_stat
[REGNO (x
)];
13374 if (rsp
->truncated_to_mode
== 0
13375 || rsp
->truncation_label
< label_tick_ebb_start
13376 || partial_subreg_p (truncated_mode
, rsp
->truncated_to_mode
))
13378 rsp
->truncated_to_mode
= truncated_mode
;
13379 rsp
->truncation_label
= label_tick
;
13385 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13386 the modes they are used in. This can help truning TRUNCATEs into
13390 record_truncated_values (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
13392 subrtx_var_iterator::array_type array
;
13393 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
13394 if (record_truncated_value (*iter
))
13395 iter
.skip_subrtxes ();
13398 /* Scan X for promoted SUBREGs. For each one found,
13399 note what it implies to the registers used in it. */
13402 check_promoted_subreg (rtx_insn
*insn
, rtx x
)
13404 if (GET_CODE (x
) == SUBREG
13405 && SUBREG_PROMOTED_VAR_P (x
)
13406 && REG_P (SUBREG_REG (x
)))
13407 record_promoted_value (insn
, x
);
13410 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
13413 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
13417 check_promoted_subreg (insn
, XEXP (x
, i
));
13421 if (XVEC (x
, i
) != 0)
13422 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13423 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
13429 /* Verify that all the registers and memory references mentioned in *LOC are
13430 still valid. *LOC was part of a value set in INSN when label_tick was
13431 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13432 the invalid references with (clobber (const_int 0)) and return 1. This
13433 replacement is useful because we often can get useful information about
13434 the form of a value (e.g., if it was produced by a shift that always
13435 produces -1 or 0) even though we don't know exactly what registers it
13436 was produced from. */
13439 get_last_value_validate (rtx
*loc
, rtx_insn
*insn
, int tick
, int replace
)
13442 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
13443 int len
= GET_RTX_LENGTH (GET_CODE (x
));
13448 unsigned int regno
= REGNO (x
);
13449 unsigned int endregno
= END_REGNO (x
);
13452 for (j
= regno
; j
< endregno
; j
++)
13454 reg_stat_type
*rsp
= ®_stat
[j
];
13455 if (rsp
->last_set_invalid
13456 /* If this is a pseudo-register that was only set once and not
13457 live at the beginning of the function, it is always valid. */
13458 || (! (regno
>= FIRST_PSEUDO_REGISTER
13459 && regno
< reg_n_sets_max
13460 && REG_N_SETS (regno
) == 1
13461 && (!REGNO_REG_SET_P
13462 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
13464 && rsp
->last_set_label
> tick
))
13467 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13474 /* If this is a memory reference, make sure that there were no stores after
13475 it that might have clobbered the value. We don't have alias info, so we
13476 assume any store invalidates it. Moreover, we only have local UIDs, so
13477 we also assume that there were stores in the intervening basic blocks. */
13478 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
13479 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
13482 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13486 for (i
= 0; i
< len
; i
++)
13490 /* Check for identical subexpressions. If x contains
13491 identical subexpression we only have to traverse one of
13493 if (i
== 1 && ARITHMETIC_P (x
))
13495 /* Note that at this point x0 has already been checked
13496 and found valid. */
13497 rtx x0
= XEXP (x
, 0);
13498 rtx x1
= XEXP (x
, 1);
13500 /* If x0 and x1 are identical then x is also valid. */
13504 /* If x1 is identical to a subexpression of x0 then
13505 while checking x0, x1 has already been checked. Thus
13506 it is valid and so as x. */
13507 if (ARITHMETIC_P (x0
)
13508 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13511 /* If x0 is identical to a subexpression of x1 then x is
13512 valid iff the rest of x1 is valid. */
13513 if (ARITHMETIC_P (x1
)
13514 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13516 get_last_value_validate (&XEXP (x1
,
13517 x0
== XEXP (x1
, 0) ? 1 : 0),
13518 insn
, tick
, replace
);
13521 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
13525 else if (fmt
[i
] == 'E')
13526 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13527 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
13528 insn
, tick
, replace
) == 0)
13532 /* If we haven't found a reason for it to be invalid, it is valid. */
13536 /* Get the last value assigned to X, if known. Some registers
13537 in the value may be replaced with (clobber (const_int 0)) if their value
13538 is known longer known reliably. */
13541 get_last_value (const_rtx x
)
13543 unsigned int regno
;
13545 reg_stat_type
*rsp
;
13547 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13548 then convert it to the desired mode. If this is a paradoxical SUBREG,
13549 we cannot predict what values the "extra" bits might have. */
13550 if (GET_CODE (x
) == SUBREG
13551 && subreg_lowpart_p (x
)
13552 && !paradoxical_subreg_p (x
)
13553 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
13554 return gen_lowpart (GET_MODE (x
), value
);
13560 rsp
= ®_stat
[regno
];
13561 value
= rsp
->last_set_value
;
13563 /* If we don't have a value, or if it isn't for this basic block and
13564 it's either a hard register, set more than once, or it's a live
13565 at the beginning of the function, return 0.
13567 Because if it's not live at the beginning of the function then the reg
13568 is always set before being used (is never used without being set).
13569 And, if it's set only once, and it's always set before use, then all
13570 uses must have the same last value, even if it's not from this basic
13574 || (rsp
->last_set_label
< label_tick_ebb_start
13575 && (regno
< FIRST_PSEUDO_REGISTER
13576 || regno
>= reg_n_sets_max
13577 || REG_N_SETS (regno
) != 1
13579 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), regno
))))
13582 /* If the value was set in a later insn than the ones we are processing,
13583 we can't use it even if the register was only set once. */
13584 if (rsp
->last_set_label
== label_tick
13585 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
13588 /* If fewer bits were set than what we are asked for now, we cannot use
13590 if (GET_MODE_PRECISION (rsp
->last_set_mode
)
13591 < GET_MODE_PRECISION (GET_MODE (x
)))
13594 /* If the value has all its registers valid, return it. */
13595 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
13598 /* Otherwise, make a copy and replace any invalid register with
13599 (clobber (const_int 0)). If that fails for some reason, return 0. */
13601 value
= copy_rtx (value
);
13602 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
13608 /* Return nonzero if expression X refers to a REG or to memory
13609 that is set in an instruction more recent than FROM_LUID. */
13612 use_crosses_set_p (const_rtx x
, int from_luid
)
13616 enum rtx_code code
= GET_CODE (x
);
13620 unsigned int regno
= REGNO (x
);
13621 unsigned endreg
= END_REGNO (x
);
13623 #ifdef PUSH_ROUNDING
13624 /* Don't allow uses of the stack pointer to be moved,
13625 because we don't know whether the move crosses a push insn. */
13626 if (regno
== STACK_POINTER_REGNUM
&& PUSH_ARGS
)
13629 for (; regno
< endreg
; regno
++)
13631 reg_stat_type
*rsp
= ®_stat
[regno
];
13633 && rsp
->last_set_label
== label_tick
13634 && DF_INSN_LUID (rsp
->last_set
) > from_luid
)
13640 if (code
== MEM
&& mem_last_set
> from_luid
)
13643 fmt
= GET_RTX_FORMAT (code
);
13645 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13650 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13651 if (use_crosses_set_p (XVECEXP (x
, i
, j
), from_luid
))
13654 else if (fmt
[i
] == 'e'
13655 && use_crosses_set_p (XEXP (x
, i
), from_luid
))
13661 /* Define three variables used for communication between the following
13664 static unsigned int reg_dead_regno
, reg_dead_endregno
;
13665 static int reg_dead_flag
;
13667 /* Function called via note_stores from reg_dead_at_p.
13669 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13670 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13673 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
13675 unsigned int regno
, endregno
;
13680 regno
= REGNO (dest
);
13681 endregno
= END_REGNO (dest
);
13682 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
13683 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
13686 /* Return nonzero if REG is known to be dead at INSN.
13688 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13689 referencing REG, it is dead. If we hit a SET referencing REG, it is
13690 live. Otherwise, see if it is live or dead at the start of the basic
13691 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13692 must be assumed to be always live. */
13695 reg_dead_at_p (rtx reg
, rtx_insn
*insn
)
13700 /* Set variables for reg_dead_at_p_1. */
13701 reg_dead_regno
= REGNO (reg
);
13702 reg_dead_endregno
= END_REGNO (reg
);
13706 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13707 we allow the machine description to decide whether use-and-clobber
13708 patterns are OK. */
13709 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
13711 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13712 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
13716 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13717 beginning of basic block. */
13718 block
= BLOCK_FOR_INSN (insn
);
13723 if (find_regno_note (insn
, REG_UNUSED
, reg_dead_regno
))
13726 note_stores (PATTERN (insn
), reg_dead_at_p_1
, NULL
);
13728 return reg_dead_flag
== 1 ? 1 : 0;
13730 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
13734 if (insn
== BB_HEAD (block
))
13737 insn
= PREV_INSN (insn
);
13740 /* Look at live-in sets for the basic block that we were in. */
13741 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13742 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
13748 /* Note hard registers in X that are used. */
13751 mark_used_regs_combine (rtx x
)
13753 RTX_CODE code
= GET_CODE (x
);
13754 unsigned int regno
;
13765 case ADDR_DIFF_VEC
:
13767 /* CC0 must die in the insn after it is set, so we don't need to take
13768 special note of it here. */
13773 /* If we are clobbering a MEM, mark any hard registers inside the
13774 address as used. */
13775 if (MEM_P (XEXP (x
, 0)))
13776 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
13781 /* A hard reg in a wide mode may really be multiple registers.
13782 If so, mark all of them just like the first. */
13783 if (regno
< FIRST_PSEUDO_REGISTER
)
13785 /* None of this applies to the stack, frame or arg pointers. */
13786 if (regno
== STACK_POINTER_REGNUM
13787 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13788 && regno
== HARD_FRAME_POINTER_REGNUM
)
13789 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
13790 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
13791 || regno
== FRAME_POINTER_REGNUM
)
13794 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
13800 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13802 rtx testreg
= SET_DEST (x
);
13804 while (GET_CODE (testreg
) == SUBREG
13805 || GET_CODE (testreg
) == ZERO_EXTRACT
13806 || GET_CODE (testreg
) == STRICT_LOW_PART
)
13807 testreg
= XEXP (testreg
, 0);
13809 if (MEM_P (testreg
))
13810 mark_used_regs_combine (XEXP (testreg
, 0));
13812 mark_used_regs_combine (SET_SRC (x
));
13820 /* Recursively scan the operands of this expression. */
13823 const char *fmt
= GET_RTX_FORMAT (code
);
13825 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13828 mark_used_regs_combine (XEXP (x
, i
));
13829 else if (fmt
[i
] == 'E')
13833 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13834 mark_used_regs_combine (XVECEXP (x
, i
, j
));
13840 /* Remove register number REGNO from the dead registers list of INSN.
13842 Return the note used to record the death, if there was one. */
13845 remove_death (unsigned int regno
, rtx_insn
*insn
)
13847 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
13850 remove_note (insn
, note
);
13855 /* For each register (hardware or pseudo) used within expression X, if its
13856 death is in an instruction with luid between FROM_LUID (inclusive) and
13857 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13858 list headed by PNOTES.
13860 That said, don't move registers killed by maybe_kill_insn.
13862 This is done when X is being merged by combination into TO_INSN. These
13863 notes will then be distributed as needed. */
13866 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx_insn
*to_insn
,
13871 enum rtx_code code
= GET_CODE (x
);
13875 unsigned int regno
= REGNO (x
);
13876 rtx_insn
*where_dead
= reg_stat
[regno
].last_death
;
13878 /* Don't move the register if it gets killed in between from and to. */
13879 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
13880 && ! reg_referenced_p (x
, maybe_kill_insn
))
13884 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
13885 && DF_INSN_LUID (where_dead
) >= from_luid
13886 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
13888 rtx note
= remove_death (regno
, where_dead
);
13890 /* It is possible for the call above to return 0. This can occur
13891 when last_death points to I2 or I1 that we combined with.
13892 In that case make a new note.
13894 We must also check for the case where X is a hard register
13895 and NOTE is a death note for a range of hard registers
13896 including X. In that case, we must put REG_DEAD notes for
13897 the remaining registers in place of NOTE. */
13899 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
13900 && partial_subreg_p (GET_MODE (x
), GET_MODE (XEXP (note
, 0))))
13902 unsigned int deadregno
= REGNO (XEXP (note
, 0));
13903 unsigned int deadend
= END_REGNO (XEXP (note
, 0));
13904 unsigned int ourend
= END_REGNO (x
);
13907 for (i
= deadregno
; i
< deadend
; i
++)
13908 if (i
< regno
|| i
>= ourend
)
13909 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
13912 /* If we didn't find any note, or if we found a REG_DEAD note that
13913 covers only part of the given reg, and we have a multi-reg hard
13914 register, then to be safe we must check for REG_DEAD notes
13915 for each register other than the first. They could have
13916 their own REG_DEAD notes lying around. */
13917 else if ((note
== 0
13919 && partial_subreg_p (GET_MODE (XEXP (note
, 0)),
13921 && regno
< FIRST_PSEUDO_REGISTER
13922 && REG_NREGS (x
) > 1)
13924 unsigned int ourend
= END_REGNO (x
);
13925 unsigned int i
, offset
;
13929 offset
= hard_regno_nregs (regno
, GET_MODE (XEXP (note
, 0)));
13933 for (i
= regno
+ offset
; i
< ourend
; i
++)
13934 move_deaths (regno_reg_rtx
[i
],
13935 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
13938 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
13940 XEXP (note
, 1) = *pnotes
;
13944 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
13950 else if (GET_CODE (x
) == SET
)
13952 rtx dest
= SET_DEST (x
);
13954 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13956 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13957 that accesses one word of a multi-word item, some
13958 piece of everything register in the expression is used by
13959 this insn, so remove any old death. */
13960 /* ??? So why do we test for equality of the sizes? */
13962 if (GET_CODE (dest
) == ZERO_EXTRACT
13963 || GET_CODE (dest
) == STRICT_LOW_PART
13964 || (GET_CODE (dest
) == SUBREG
13965 && (((GET_MODE_SIZE (GET_MODE (dest
))
13966 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
13967 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
13968 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
))))
13970 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13974 /* If this is some other SUBREG, we know it replaces the entire
13975 value, so use that as the destination. */
13976 if (GET_CODE (dest
) == SUBREG
)
13977 dest
= SUBREG_REG (dest
);
13979 /* If this is a MEM, adjust deaths of anything used in the address.
13980 For a REG (the only other possibility), the entire value is
13981 being replaced so the old value is not used in this insn. */
13984 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
13989 else if (GET_CODE (x
) == CLOBBER
)
13992 len
= GET_RTX_LENGTH (code
);
13993 fmt
= GET_RTX_FORMAT (code
);
13995 for (i
= 0; i
< len
; i
++)
14000 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
14001 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
14004 else if (fmt
[i
] == 'e')
14005 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14009 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14010 pattern of an insn. X must be a REG. */
14013 reg_bitfield_target_p (rtx x
, rtx body
)
14017 if (GET_CODE (body
) == SET
)
14019 rtx dest
= SET_DEST (body
);
14021 unsigned int regno
, tregno
, endregno
, endtregno
;
14023 if (GET_CODE (dest
) == ZERO_EXTRACT
)
14024 target
= XEXP (dest
, 0);
14025 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
14026 target
= SUBREG_REG (XEXP (dest
, 0));
14030 if (GET_CODE (target
) == SUBREG
)
14031 target
= SUBREG_REG (target
);
14033 if (!REG_P (target
))
14036 tregno
= REGNO (target
), regno
= REGNO (x
);
14037 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
14038 return target
== x
;
14040 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
14041 endregno
= end_hard_regno (GET_MODE (x
), regno
);
14043 return endregno
> tregno
&& regno
< endtregno
;
14046 else if (GET_CODE (body
) == PARALLEL
)
14047 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
14048 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
14054 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14055 as appropriate. I3 and I2 are the insns resulting from the combination
14056 insns including FROM (I2 may be zero).
14058 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14059 not need REG_DEAD notes because they are being substituted for. This
14060 saves searching in the most common cases.
14062 Each note in the list is either ignored or placed on some insns, depending
14063 on the type of note. */
14066 distribute_notes (rtx notes
, rtx_insn
*from_insn
, rtx_insn
*i3
, rtx_insn
*i2
,
14067 rtx elim_i2
, rtx elim_i1
, rtx elim_i0
)
14069 rtx note
, next_note
;
14071 rtx_insn
*tem_insn
;
14073 for (note
= notes
; note
; note
= next_note
)
14075 rtx_insn
*place
= 0, *place2
= 0;
14077 next_note
= XEXP (note
, 1);
14078 switch (REG_NOTE_KIND (note
))
14082 /* Doesn't matter much where we put this, as long as it's somewhere.
14083 It is preferable to keep these notes on branches, which is most
14084 likely to be i3. */
14088 case REG_NON_LOCAL_GOTO
:
14093 gcc_assert (i2
&& JUMP_P (i2
));
14098 case REG_EH_REGION
:
14099 /* These notes must remain with the call or trapping instruction. */
14102 else if (i2
&& CALL_P (i2
))
14106 gcc_assert (cfun
->can_throw_non_call_exceptions
);
14107 if (may_trap_p (i3
))
14109 else if (i2
&& may_trap_p (i2
))
14111 /* ??? Otherwise assume we've combined things such that we
14112 can now prove that the instructions can't trap. Drop the
14113 note in this case. */
14117 case REG_ARGS_SIZE
:
14118 /* ??? How to distribute between i3-i1. Assume i3 contains the
14119 entire adjustment. Assert i3 contains at least some adjust. */
14120 if (!noop_move_p (i3
))
14122 int old_size
, args_size
= INTVAL (XEXP (note
, 0));
14123 /* fixup_args_size_notes looks at REG_NORETURN note,
14124 so ensure the note is placed there first. */
14128 for (np
= &next_note
; *np
; np
= &XEXP (*np
, 1))
14129 if (REG_NOTE_KIND (*np
) == REG_NORETURN
)
14133 XEXP (n
, 1) = REG_NOTES (i3
);
14134 REG_NOTES (i3
) = n
;
14138 old_size
= fixup_args_size_notes (PREV_INSN (i3
), i3
, args_size
);
14139 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14140 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14141 gcc_assert (old_size
!= args_size
14143 && !ACCUMULATE_OUTGOING_ARGS
14144 && find_reg_note (i3
, REG_NORETURN
, NULL_RTX
)));
14151 case REG_CALL_DECL
:
14152 /* These notes must remain with the call. It should not be
14153 possible for both I2 and I3 to be a call. */
14158 gcc_assert (i2
&& CALL_P (i2
));
14164 /* Any clobbers for i3 may still exist, and so we must process
14165 REG_UNUSED notes from that insn.
14167 Any clobbers from i2 or i1 can only exist if they were added by
14168 recog_for_combine. In that case, recog_for_combine created the
14169 necessary REG_UNUSED notes. Trying to keep any original
14170 REG_UNUSED notes from these insns can cause incorrect output
14171 if it is for the same register as the original i3 dest.
14172 In that case, we will notice that the register is set in i3,
14173 and then add a REG_UNUSED note for the destination of i3, which
14174 is wrong. However, it is possible to have REG_UNUSED notes from
14175 i2 or i1 for register which were both used and clobbered, so
14176 we keep notes from i2 or i1 if they will turn into REG_DEAD
14179 /* If this register is set or clobbered in I3, put the note there
14180 unless there is one already. */
14181 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
14183 if (from_insn
!= i3
)
14186 if (! (REG_P (XEXP (note
, 0))
14187 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
14188 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
14191 /* Otherwise, if this register is used by I3, then this register
14192 now dies here, so we must put a REG_DEAD note here unless there
14194 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
14195 && ! (REG_P (XEXP (note
, 0))
14196 ? find_regno_note (i3
, REG_DEAD
,
14197 REGNO (XEXP (note
, 0)))
14198 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
14200 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14208 /* These notes say something about results of an insn. We can
14209 only support them if they used to be on I3 in which case they
14210 remain on I3. Otherwise they are ignored.
14212 If the note refers to an expression that is not a constant, we
14213 must also ignore the note since we cannot tell whether the
14214 equivalence is still true. It might be possible to do
14215 slightly better than this (we only have a problem if I2DEST
14216 or I1DEST is present in the expression), but it doesn't
14217 seem worth the trouble. */
14219 if (from_insn
== i3
14220 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
14225 /* These notes say something about how a register is used. They must
14226 be present on any use of the register in I2 or I3. */
14227 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
14230 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
14239 case REG_LABEL_TARGET
:
14240 case REG_LABEL_OPERAND
:
14241 /* This can show up in several ways -- either directly in the
14242 pattern, or hidden off in the constant pool with (or without?)
14243 a REG_EQUAL note. */
14244 /* ??? Ignore the without-reg_equal-note problem for now. */
14245 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
14246 || ((tem_note
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
14247 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14248 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0)))
14252 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
14253 || ((tem_note
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
14254 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14255 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0))))
14263 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14264 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14266 if (place
&& JUMP_P (place
)
14267 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14268 && (JUMP_LABEL (place
) == NULL
14269 || JUMP_LABEL (place
) == XEXP (note
, 0)))
14271 rtx label
= JUMP_LABEL (place
);
14274 JUMP_LABEL (place
) = XEXP (note
, 0);
14275 else if (LABEL_P (label
))
14276 LABEL_NUSES (label
)--;
14279 if (place2
&& JUMP_P (place2
)
14280 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14281 && (JUMP_LABEL (place2
) == NULL
14282 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
14284 rtx label
= JUMP_LABEL (place2
);
14287 JUMP_LABEL (place2
) = XEXP (note
, 0);
14288 else if (LABEL_P (label
))
14289 LABEL_NUSES (label
)--;
14295 /* This note says something about the value of a register prior
14296 to the execution of an insn. It is too much trouble to see
14297 if the note is still correct in all situations. It is better
14298 to simply delete it. */
14302 /* If we replaced the right hand side of FROM_INSN with a
14303 REG_EQUAL note, the original use of the dying register
14304 will not have been combined into I3 and I2. In such cases,
14305 FROM_INSN is guaranteed to be the first of the combined
14306 instructions, so we simply need to search back before
14307 FROM_INSN for the previous use or set of this register,
14308 then alter the notes there appropriately.
14310 If the register is used as an input in I3, it dies there.
14311 Similarly for I2, if it is nonzero and adjacent to I3.
14313 If the register is not used as an input in either I3 or I2
14314 and it is not one of the registers we were supposed to eliminate,
14315 there are two possibilities. We might have a non-adjacent I2
14316 or we might have somehow eliminated an additional register
14317 from a computation. For example, we might have had A & B where
14318 we discover that B will always be zero. In this case we will
14319 eliminate the reference to A.
14321 In both cases, we must search to see if we can find a previous
14322 use of A and put the death note there. */
14325 && from_insn
== i2mod
14326 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
14327 tem_insn
= from_insn
;
14331 && CALL_P (from_insn
)
14332 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
14334 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
14336 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
14337 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14339 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
14341 && reg_overlap_mentioned_p (XEXP (note
, 0),
14343 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
14344 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
14347 /* If the new I2 sets the same register that is marked dead
14348 in the note, we do not know where to put the note.
14350 if (i2
!= 0 && reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14356 basic_block bb
= this_basic_block
;
14358 for (tem_insn
= PREV_INSN (tem_insn
); place
== 0; tem_insn
= PREV_INSN (tem_insn
))
14360 if (!NONDEBUG_INSN_P (tem_insn
))
14362 if (tem_insn
== BB_HEAD (bb
))
14367 /* If the register is being set at TEM_INSN, see if that is all
14368 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14369 into a REG_UNUSED note instead. Don't delete sets to
14370 global register vars. */
14371 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
14372 || !global_regs
[REGNO (XEXP (note
, 0))])
14373 && reg_set_p (XEXP (note
, 0), PATTERN (tem_insn
)))
14375 rtx set
= single_set (tem_insn
);
14376 rtx inner_dest
= 0;
14377 rtx_insn
*cc0_setter
= NULL
;
14380 for (inner_dest
= SET_DEST (set
);
14381 (GET_CODE (inner_dest
) == STRICT_LOW_PART
14382 || GET_CODE (inner_dest
) == SUBREG
14383 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
14384 inner_dest
= XEXP (inner_dest
, 0))
14387 /* Verify that it was the set, and not a clobber that
14388 modified the register.
14390 CC0 targets must be careful to maintain setter/user
14391 pairs. If we cannot delete the setter due to side
14392 effects, mark the user with an UNUSED note instead
14395 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
14396 && rtx_equal_p (XEXP (note
, 0), inner_dest
)
14398 || (! reg_mentioned_p (cc0_rtx
, SET_SRC (set
))
14399 || ((cc0_setter
= prev_cc0_setter (tem_insn
)) != NULL
14400 && sets_cc0_p (PATTERN (cc0_setter
)) > 0))))
14402 /* Move the notes and links of TEM_INSN elsewhere.
14403 This might delete other dead insns recursively.
14404 First set the pattern to something that won't use
14406 rtx old_notes
= REG_NOTES (tem_insn
);
14408 PATTERN (tem_insn
) = pc_rtx
;
14409 REG_NOTES (tem_insn
) = NULL
;
14411 distribute_notes (old_notes
, tem_insn
, tem_insn
, NULL
,
14412 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14413 distribute_links (LOG_LINKS (tem_insn
));
14415 unsigned int regno
= REGNO (XEXP (note
, 0));
14416 reg_stat_type
*rsp
= ®_stat
[regno
];
14417 if (rsp
->last_set
== tem_insn
)
14418 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14420 SET_INSN_DELETED (tem_insn
);
14421 if (tem_insn
== i2
)
14424 /* Delete the setter too. */
14427 PATTERN (cc0_setter
) = pc_rtx
;
14428 old_notes
= REG_NOTES (cc0_setter
);
14429 REG_NOTES (cc0_setter
) = NULL
;
14431 distribute_notes (old_notes
, cc0_setter
,
14433 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14434 distribute_links (LOG_LINKS (cc0_setter
));
14436 SET_INSN_DELETED (cc0_setter
);
14437 if (cc0_setter
== i2
)
14443 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
14445 /* If there isn't already a REG_UNUSED note, put one
14446 here. Do not place a REG_DEAD note, even if
14447 the register is also used here; that would not
14448 match the algorithm used in lifetime analysis
14449 and can cause the consistency check in the
14450 scheduler to fail. */
14451 if (! find_regno_note (tem_insn
, REG_UNUSED
,
14452 REGNO (XEXP (note
, 0))))
14457 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem_insn
))
14458 || (CALL_P (tem_insn
)
14459 && find_reg_fusage (tem_insn
, USE
, XEXP (note
, 0))))
14463 /* If we are doing a 3->2 combination, and we have a
14464 register which formerly died in i3 and was not used
14465 by i2, which now no longer dies in i3 and is used in
14466 i2 but does not die in i2, and place is between i2
14467 and i3, then we may need to move a link from place to
14469 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
14471 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
14472 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14474 struct insn_link
*links
= LOG_LINKS (place
);
14475 LOG_LINKS (place
) = NULL
;
14476 distribute_links (links
);
14481 if (tem_insn
== BB_HEAD (bb
))
14487 /* If the register is set or already dead at PLACE, we needn't do
14488 anything with this note if it is still a REG_DEAD note.
14489 We check here if it is set at all, not if is it totally replaced,
14490 which is what `dead_or_set_p' checks, so also check for it being
14493 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
14495 unsigned int regno
= REGNO (XEXP (note
, 0));
14496 reg_stat_type
*rsp
= ®_stat
[regno
];
14498 if (dead_or_set_p (place
, XEXP (note
, 0))
14499 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
14501 /* Unless the register previously died in PLACE, clear
14502 last_death. [I no longer understand why this is
14504 if (rsp
->last_death
!= place
)
14505 rsp
->last_death
= 0;
14509 rsp
->last_death
= place
;
14511 /* If this is a death note for a hard reg that is occupying
14512 multiple registers, ensure that we are still using all
14513 parts of the object. If we find a piece of the object
14514 that is unused, we must arrange for an appropriate REG_DEAD
14515 note to be added for it. However, we can't just emit a USE
14516 and tag the note to it, since the register might actually
14517 be dead; so we recourse, and the recursive call then finds
14518 the previous insn that used this register. */
14520 if (place
&& REG_NREGS (XEXP (note
, 0)) > 1)
14522 unsigned int endregno
= END_REGNO (XEXP (note
, 0));
14523 bool all_used
= true;
14526 for (i
= regno
; i
< endregno
; i
++)
14527 if ((! refers_to_regno_p (i
, PATTERN (place
))
14528 && ! find_regno_fusage (place
, USE
, i
))
14529 || dead_or_set_regno_p (place
, i
))
14537 /* Put only REG_DEAD notes for pieces that are
14538 not already dead or set. */
14540 for (i
= regno
; i
< endregno
;
14541 i
+= hard_regno_nregs (i
, reg_raw_mode
[i
]))
14543 rtx piece
= regno_reg_rtx
[i
];
14544 basic_block bb
= this_basic_block
;
14546 if (! dead_or_set_p (place
, piece
)
14547 && ! reg_bitfield_target_p (piece
,
14550 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
14553 distribute_notes (new_note
, place
, place
,
14554 NULL
, NULL_RTX
, NULL_RTX
,
14557 else if (! refers_to_regno_p (i
, PATTERN (place
))
14558 && ! find_regno_fusage (place
, USE
, i
))
14559 for (tem_insn
= PREV_INSN (place
); ;
14560 tem_insn
= PREV_INSN (tem_insn
))
14562 if (!NONDEBUG_INSN_P (tem_insn
))
14564 if (tem_insn
== BB_HEAD (bb
))
14568 if (dead_or_set_p (tem_insn
, piece
)
14569 || reg_bitfield_target_p (piece
,
14570 PATTERN (tem_insn
)))
14572 add_reg_note (tem_insn
, REG_UNUSED
, piece
);
14585 /* Any other notes should not be present at this point in the
14587 gcc_unreachable ();
14592 XEXP (note
, 1) = REG_NOTES (place
);
14593 REG_NOTES (place
) = note
;
14597 add_shallow_copy_of_reg_note (place2
, note
);
14601 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14602 I3, I2, and I1 to new locations. This is also called to add a link
14603 pointing at I3 when I3's destination is changed. */
14606 distribute_links (struct insn_link
*links
)
14608 struct insn_link
*link
, *next_link
;
14610 for (link
= links
; link
; link
= next_link
)
14612 rtx_insn
*place
= 0;
14616 next_link
= link
->next
;
14618 /* If the insn that this link points to is a NOTE, ignore it. */
14619 if (NOTE_P (link
->insn
))
14623 rtx pat
= PATTERN (link
->insn
);
14624 if (GET_CODE (pat
) == SET
)
14626 else if (GET_CODE (pat
) == PARALLEL
)
14629 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
14631 set
= XVECEXP (pat
, 0, i
);
14632 if (GET_CODE (set
) != SET
)
14635 reg
= SET_DEST (set
);
14636 while (GET_CODE (reg
) == ZERO_EXTRACT
14637 || GET_CODE (reg
) == STRICT_LOW_PART
14638 || GET_CODE (reg
) == SUBREG
)
14639 reg
= XEXP (reg
, 0);
14644 if (REGNO (reg
) == link
->regno
)
14647 if (i
== XVECLEN (pat
, 0))
14653 reg
= SET_DEST (set
);
14655 while (GET_CODE (reg
) == ZERO_EXTRACT
14656 || GET_CODE (reg
) == STRICT_LOW_PART
14657 || GET_CODE (reg
) == SUBREG
)
14658 reg
= XEXP (reg
, 0);
14660 /* A LOG_LINK is defined as being placed on the first insn that uses
14661 a register and points to the insn that sets the register. Start
14662 searching at the next insn after the target of the link and stop
14663 when we reach a set of the register or the end of the basic block.
14665 Note that this correctly handles the link that used to point from
14666 I3 to I2. Also note that not much searching is typically done here
14667 since most links don't point very far away. */
14669 for (insn
= NEXT_INSN (link
->insn
);
14670 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
14671 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
14672 insn
= NEXT_INSN (insn
))
14673 if (DEBUG_INSN_P (insn
))
14675 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
14677 if (reg_referenced_p (reg
, PATTERN (insn
)))
14681 else if (CALL_P (insn
)
14682 && find_reg_fusage (insn
, USE
, reg
))
14687 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
14690 /* If we found a place to put the link, place it there unless there
14691 is already a link to the same insn as LINK at that point. */
14695 struct insn_link
*link2
;
14697 FOR_EACH_LOG_LINK (link2
, place
)
14698 if (link2
->insn
== link
->insn
&& link2
->regno
== link
->regno
)
14703 link
->next
= LOG_LINKS (place
);
14704 LOG_LINKS (place
) = link
;
14706 /* Set added_links_insn to the earliest insn we added a
14708 if (added_links_insn
== 0
14709 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
14710 added_links_insn
= place
;
14716 /* Check for any register or memory mentioned in EQUIV that is not
14717 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14718 of EXPR where some registers may have been replaced by constants. */
14721 unmentioned_reg_p (rtx equiv
, rtx expr
)
14723 subrtx_iterator::array_type array
;
14724 FOR_EACH_SUBRTX (iter
, array
, equiv
, NONCONST
)
14726 const_rtx x
= *iter
;
14727 if ((REG_P (x
) || MEM_P (x
))
14728 && !reg_mentioned_p (x
, expr
))
14734 DEBUG_FUNCTION
void
14735 dump_combine_stats (FILE *file
)
14739 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14740 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
14744 dump_combine_total_stats (FILE *file
)
14748 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14749 total_attempts
, total_merges
, total_extras
, total_successes
);
14752 /* Try combining insns through substitution. */
14753 static unsigned int
14754 rest_of_handle_combine (void)
14756 int rebuild_jump_labels_after_combine
;
14758 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
14759 df_note_add_problem ();
14762 regstat_init_n_sets_and_refs ();
14763 reg_n_sets_max
= max_reg_num ();
14765 rebuild_jump_labels_after_combine
14766 = combine_instructions (get_insns (), max_reg_num ());
14768 /* Combining insns may have turned an indirect jump into a
14769 direct jump. Rebuild the JUMP_LABEL fields of jumping
14771 if (rebuild_jump_labels_after_combine
)
14773 if (dom_info_available_p (CDI_DOMINATORS
))
14774 free_dominance_info (CDI_DOMINATORS
);
14775 timevar_push (TV_JUMP
);
14776 rebuild_jump_labels (get_insns ());
14778 timevar_pop (TV_JUMP
);
14781 regstat_free_n_sets_and_refs ();
14787 const pass_data pass_data_combine
=
14789 RTL_PASS
, /* type */
14790 "combine", /* name */
14791 OPTGROUP_NONE
, /* optinfo_flags */
14792 TV_COMBINE
, /* tv_id */
14793 PROP_cfglayout
, /* properties_required */
14794 0, /* properties_provided */
14795 0, /* properties_destroyed */
14796 0, /* todo_flags_start */
14797 TODO_df_finish
, /* todo_flags_finish */
14800 class pass_combine
: public rtl_opt_pass
14803 pass_combine (gcc::context
*ctxt
)
14804 : rtl_opt_pass (pass_data_combine
, ctxt
)
14807 /* opt_pass methods: */
14808 virtual bool gate (function
*) { return (optimize
> 0); }
14809 virtual unsigned int execute (function
*)
14811 return rest_of_handle_combine ();
14814 }; // class pass_combine
14816 } // anon namespace
14819 make_pass_combine (gcc::context
*ctxt
)
14821 return new pass_combine (ctxt
);