1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
95 #include "stor-layout.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
108 /* Number of attempts to combine instructions in this function. */
110 static int combine_attempts
;
112 /* Number of attempts that got as far as substitution in this function. */
114 static int combine_merges
;
116 /* Number of instructions combined with added SETs in this function. */
118 static int combine_extras
;
120 /* Number of instructions combined in this function. */
122 static int combine_successes
;
124 /* Totals over entire compilation. */
126 static int total_attempts
, total_merges
, total_extras
, total_successes
;
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
135 static rtx_insn
*i2mod
;
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139 static rtx i2mod_old_rhs
;
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143 static rtx i2mod_new_rhs
;
145 struct reg_stat_type
{
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn
*last_death
;
149 /* Record last point of modification of (hard or pseudo) register n. */
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
158 We use an approach similar to that used by cse, but change it in the
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
165 Therefore, we maintain the following fields:
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
181 (The next two parameters are out of date).
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
195 /* Record last value assigned to (hard or pseudo) register n. */
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
202 int last_set_table_tick
;
204 /* Record the value of label_tick when the value for register n is placed in
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
214 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
215 char last_set_sign_bit_copies
;
216 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
222 char last_set_invalid
;
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
234 If an entry is zero, it means that we don't know anything special. */
236 unsigned char sign_bit_copies
;
238 unsigned HOST_WIDE_INT nonzero_bits
;
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
244 int truncation_label
;
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
251 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
255 static vec
<reg_stat_type
> reg_stat
;
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
265 static unsigned int reg_n_sets_max
;
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
270 static int mem_last_set
;
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
275 static int last_call_luid
;
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
283 static rtx_insn
*subst_insn
;
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
292 static int subst_low_luid
;
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
297 static HARD_REG_SET newpat_used_regs
;
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
303 static rtx_insn
*added_links_insn
;
305 /* Basic block in which we are performing combines. */
306 static basic_block this_basic_block
;
307 static bool optimize_this_for_speed_p
;
310 /* Length of the currently allocated uid_insn_cost array. */
312 static int max_uid_known
;
314 /* The following array records the insn_rtx_cost for every insn
315 in the instruction stream. */
317 static int *uid_insn_cost
;
319 /* The following array records the LOG_LINKS for every insn in the
320 instruction stream as struct insn_link pointers. */
325 struct insn_link
*next
;
328 static struct insn_link
**uid_log_links
;
331 insn_uid_check (const_rtx insn
)
333 int uid
= INSN_UID (insn
);
334 gcc_checking_assert (uid
<= max_uid_known
);
338 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
339 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
341 #define FOR_EACH_LOG_LINK(L, INSN) \
342 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
344 /* Links for LOG_LINKS are allocated from this obstack. */
346 static struct obstack insn_link_obstack
;
348 /* Allocate a link. */
350 static inline struct insn_link
*
351 alloc_insn_link (rtx_insn
*insn
, unsigned int regno
, struct insn_link
*next
)
354 = (struct insn_link
*) obstack_alloc (&insn_link_obstack
,
355 sizeof (struct insn_link
));
362 /* Incremented for each basic block. */
364 static int label_tick
;
366 /* Reset to label_tick for each extended basic block in scanning order. */
368 static int label_tick_ebb_start
;
370 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
371 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
373 static machine_mode nonzero_bits_mode
;
375 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
376 be safely used. It is zero while computing them and after combine has
377 completed. This former test prevents propagating values based on
378 previously set values, which can be incorrect if a variable is modified
381 static int nonzero_sign_valid
;
384 /* Record one modification to rtl structure
385 to be undone by storing old_contents into *where. */
387 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
, UNDO_LINKS
};
393 union { rtx r
; int i
; machine_mode m
; struct insn_link
*l
; } old_contents
;
394 union { rtx
*r
; int *i
; struct insn_link
**l
; } where
;
397 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
398 num_undo says how many are currently recorded.
400 other_insn is nonzero if we have modified some other insn in the process
401 of working on subst_insn. It must be verified too. */
407 rtx_insn
*other_insn
;
410 static struct undobuf undobuf
;
412 /* Number of times the pseudo being substituted for
413 was found and replaced. */
415 static int n_occurrences
;
417 static rtx
reg_nonzero_bits_for_combine (const_rtx
, scalar_int_mode
,
419 unsigned HOST_WIDE_INT
*);
420 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, scalar_int_mode
,
423 static void do_SUBST (rtx
*, rtx
);
424 static void do_SUBST_INT (int *, int);
425 static void init_reg_last (void);
426 static void setup_incoming_promotions (rtx_insn
*);
427 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
428 static int cant_combine_insn_p (rtx_insn
*);
429 static int can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
430 rtx_insn
*, rtx_insn
*, rtx
*, rtx
*);
431 static int combinable_i3pat (rtx_insn
*, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
432 static int contains_muldiv (rtx
);
433 static rtx_insn
*try_combine (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
435 static void undo_all (void);
436 static void undo_commit (void);
437 static rtx
*find_split_point (rtx
*, rtx_insn
*, bool);
438 static rtx
subst (rtx
, rtx
, rtx
, int, int, int);
439 static rtx
combine_simplify_rtx (rtx
, machine_mode
, int, int);
440 static rtx
simplify_if_then_else (rtx
);
441 static rtx
simplify_set (rtx
);
442 static rtx
simplify_logical (rtx
);
443 static rtx
expand_compound_operation (rtx
);
444 static const_rtx
expand_field_assignment (const_rtx
);
445 static rtx
make_extraction (machine_mode
, rtx
, HOST_WIDE_INT
,
446 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
447 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
448 unsigned HOST_WIDE_INT
*);
449 static rtx
canon_reg_for_combine (rtx
, rtx
);
450 static rtx
force_int_to_mode (rtx
, scalar_int_mode
, scalar_int_mode
,
451 scalar_int_mode
, unsigned HOST_WIDE_INT
, int);
452 static rtx
force_to_mode (rtx
, machine_mode
,
453 unsigned HOST_WIDE_INT
, int);
454 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
455 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
456 static int rtx_equal_for_field_assignment_p (rtx
, rtx
, bool = false);
457 static rtx
make_field_assignment (rtx
);
458 static rtx
apply_distributive_law (rtx
);
459 static rtx
distribute_and_simplify_rtx (rtx
, int);
460 static rtx
simplify_and_const_int_1 (scalar_int_mode
, rtx
,
461 unsigned HOST_WIDE_INT
);
462 static rtx
simplify_and_const_int (rtx
, scalar_int_mode
, rtx
,
463 unsigned HOST_WIDE_INT
);
464 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
465 HOST_WIDE_INT
, machine_mode
, int *);
466 static rtx
simplify_shift_const_1 (enum rtx_code
, machine_mode
, rtx
, int);
467 static rtx
simplify_shift_const (rtx
, enum rtx_code
, machine_mode
, rtx
,
469 static int recog_for_combine (rtx
*, rtx_insn
*, rtx
*);
470 static rtx
gen_lowpart_for_combine (machine_mode
, rtx
);
471 static enum rtx_code
simplify_compare_const (enum rtx_code
, machine_mode
,
473 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
474 static void update_table_tick (rtx
);
475 static void record_value_for_reg (rtx
, rtx_insn
*, rtx
);
476 static void check_promoted_subreg (rtx_insn
*, rtx
);
477 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
478 static void record_dead_and_set_regs (rtx_insn
*);
479 static int get_last_value_validate (rtx
*, rtx_insn
*, int, int);
480 static rtx
get_last_value (const_rtx
);
481 static int use_crosses_set_p (const_rtx
, int);
482 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
483 static int reg_dead_at_p (rtx
, rtx_insn
*);
484 static void move_deaths (rtx
, rtx
, int, rtx_insn
*, rtx
*);
485 static int reg_bitfield_target_p (rtx
, rtx
);
486 static void distribute_notes (rtx
, rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx
, rtx
, rtx
);
487 static void distribute_links (struct insn_link
*);
488 static void mark_used_regs_combine (rtx
);
489 static void record_promoted_value (rtx_insn
*, rtx
);
490 static bool unmentioned_reg_p (rtx
, rtx
);
491 static void record_truncated_values (rtx
*, void *);
492 static bool reg_truncated_to_mode (machine_mode
, const_rtx
);
493 static rtx
gen_lowpart_or_truncate (machine_mode
, rtx
);
496 /* It is not safe to use ordinary gen_lowpart in combine.
497 See comments in gen_lowpart_for_combine. */
498 #undef RTL_HOOKS_GEN_LOWPART
499 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
501 /* Our implementation of gen_lowpart never emits a new pseudo. */
502 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
503 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
505 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
506 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
508 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
509 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
511 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
512 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
514 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
517 /* Convenience wrapper for the canonicalize_comparison target hook.
518 Target hooks cannot use enum rtx_code. */
520 target_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
,
521 bool op0_preserve_value
)
523 int code_int
= (int)*code
;
524 targetm
.canonicalize_comparison (&code_int
, op0
, op1
, op0_preserve_value
);
525 *code
= (enum rtx_code
)code_int
;
528 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
529 PATTERN can not be split. Otherwise, it returns an insn sequence.
530 This is a wrapper around split_insns which ensures that the
531 reg_stat vector is made larger if the splitter creates a new
535 combine_split_insns (rtx pattern
, rtx_insn
*insn
)
540 ret
= split_insns (pattern
, insn
);
541 nregs
= max_reg_num ();
542 if (nregs
> reg_stat
.length ())
543 reg_stat
.safe_grow_cleared (nregs
);
547 /* This is used by find_single_use to locate an rtx in LOC that
548 contains exactly one use of DEST, which is typically either a REG
549 or CC0. It returns a pointer to the innermost rtx expression
550 containing DEST. Appearances of DEST that are being used to
551 totally replace it are not counted. */
554 find_single_use_1 (rtx dest
, rtx
*loc
)
557 enum rtx_code code
= GET_CODE (x
);
573 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
574 of a REG that occupies all of the REG, the insn uses DEST if
575 it is mentioned in the destination or the source. Otherwise, we
576 need just check the source. */
577 if (GET_CODE (SET_DEST (x
)) != CC0
578 && GET_CODE (SET_DEST (x
)) != PC
579 && !REG_P (SET_DEST (x
))
580 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
581 && REG_P (SUBREG_REG (SET_DEST (x
)))
582 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
583 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
584 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
585 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))))
588 return find_single_use_1 (dest
, &SET_SRC (x
));
592 return find_single_use_1 (dest
, &XEXP (x
, 0));
598 /* If it wasn't one of the common cases above, check each expression and
599 vector of this code. Look for a unique usage of DEST. */
601 fmt
= GET_RTX_FORMAT (code
);
602 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
606 if (dest
== XEXP (x
, i
)
607 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
608 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
611 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
614 result
= this_result
;
615 else if (this_result
)
616 /* Duplicate usage. */
619 else if (fmt
[i
] == 'E')
623 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
625 if (XVECEXP (x
, i
, j
) == dest
627 && REG_P (XVECEXP (x
, i
, j
))
628 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
631 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
634 result
= this_result
;
635 else if (this_result
)
645 /* See if DEST, produced in INSN, is used only a single time in the
646 sequel. If so, return a pointer to the innermost rtx expression in which
649 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
651 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
652 care about REG_DEAD notes or LOG_LINKS.
654 Otherwise, we find the single use by finding an insn that has a
655 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
656 only referenced once in that insn, we know that it must be the first
657 and last insn referencing DEST. */
660 find_single_use (rtx dest
, rtx_insn
*insn
, rtx_insn
**ploc
)
665 struct insn_link
*link
;
669 next
= NEXT_INSN (insn
);
671 || (!NONJUMP_INSN_P (next
) && !JUMP_P (next
)))
674 result
= find_single_use_1 (dest
, &PATTERN (next
));
683 bb
= BLOCK_FOR_INSN (insn
);
684 for (next
= NEXT_INSN (insn
);
685 next
&& BLOCK_FOR_INSN (next
) == bb
;
686 next
= NEXT_INSN (next
))
687 if (NONDEBUG_INSN_P (next
) && dead_or_set_p (next
, dest
))
689 FOR_EACH_LOG_LINK (link
, next
)
690 if (link
->insn
== insn
&& link
->regno
== REGNO (dest
))
695 result
= find_single_use_1 (dest
, &PATTERN (next
));
705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
706 insn. The substitution can be undone by undo_all. If INTO is already
707 set to NEWVAL, do not record this change. Because computing NEWVAL might
708 also call SUBST, we have to compute it before we put anything into
712 do_SUBST (rtx
*into
, rtx newval
)
717 if (oldval
== newval
)
720 /* We'd like to catch as many invalid transformations here as
721 possible. Unfortunately, there are way too many mode changes
722 that are perfectly valid, so we'd waste too much effort for
723 little gain doing the checks here. Focus on catching invalid
724 transformations involving integer constants. */
725 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
726 && CONST_INT_P (newval
))
728 /* Sanity check that we're replacing oldval with a CONST_INT
729 that is a valid sign-extension for the original mode. */
730 gcc_assert (INTVAL (newval
)
731 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
733 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
734 CONST_INT is not valid, because after the replacement, the
735 original mode would be gone. Unfortunately, we can't tell
736 when do_SUBST is called to replace the operand thereof, so we
737 perform this test on oldval instead, checking whether an
738 invalid replacement took place before we got here. */
739 gcc_assert (!(GET_CODE (oldval
) == SUBREG
740 && CONST_INT_P (SUBREG_REG (oldval
))));
741 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
742 && CONST_INT_P (XEXP (oldval
, 0))));
746 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
748 buf
= XNEW (struct undo
);
750 buf
->kind
= UNDO_RTX
;
752 buf
->old_contents
.r
= oldval
;
755 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
758 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
760 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
761 for the value of a HOST_WIDE_INT value (including CONST_INT) is
765 do_SUBST_INT (int *into
, int newval
)
770 if (oldval
== newval
)
774 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
776 buf
= XNEW (struct undo
);
778 buf
->kind
= UNDO_INT
;
780 buf
->old_contents
.i
= oldval
;
783 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
786 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
788 /* Similar to SUBST, but just substitute the mode. This is used when
789 changing the mode of a pseudo-register, so that any other
790 references to the entry in the regno_reg_rtx array will change as
794 do_SUBST_MODE (rtx
*into
, machine_mode newval
)
797 machine_mode oldval
= GET_MODE (*into
);
799 if (oldval
== newval
)
803 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
805 buf
= XNEW (struct undo
);
807 buf
->kind
= UNDO_MODE
;
809 buf
->old_contents
.m
= oldval
;
810 adjust_reg_mode (*into
, newval
);
812 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
815 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
820 do_SUBST_LINK (struct insn_link
**into
, struct insn_link
*newval
)
823 struct insn_link
* oldval
= *into
;
825 if (oldval
== newval
)
829 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
831 buf
= XNEW (struct undo
);
833 buf
->kind
= UNDO_LINKS
;
835 buf
->old_contents
.l
= oldval
;
838 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
843 /* Subroutine of try_combine. Determine whether the replacement patterns
844 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
845 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
846 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
847 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
848 of all the instructions can be estimated and the replacements are more
849 expensive than the original sequence. */
852 combine_validate_cost (rtx_insn
*i0
, rtx_insn
*i1
, rtx_insn
*i2
, rtx_insn
*i3
,
853 rtx newpat
, rtx newi2pat
, rtx newotherpat
)
855 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
856 int new_i2_cost
, new_i3_cost
;
857 int old_cost
, new_cost
;
859 /* Lookup the original insn_rtx_costs. */
860 i2_cost
= INSN_COST (i2
);
861 i3_cost
= INSN_COST (i3
);
865 i1_cost
= INSN_COST (i1
);
868 i0_cost
= INSN_COST (i0
);
869 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
870 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
874 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
875 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
881 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
882 i1_cost
= i0_cost
= 0;
885 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
887 if (old_cost
&& i1
&& INSN_UID (i1
) == INSN_UID (i2
))
891 /* Calculate the replacement insn_rtx_costs. */
892 new_i3_cost
= insn_rtx_cost (newpat
, optimize_this_for_speed_p
);
895 new_i2_cost
= insn_rtx_cost (newi2pat
, optimize_this_for_speed_p
);
896 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
897 ? new_i2_cost
+ new_i3_cost
: 0;
901 new_cost
= new_i3_cost
;
905 if (undobuf
.other_insn
)
907 int old_other_cost
, new_other_cost
;
909 old_other_cost
= INSN_COST (undobuf
.other_insn
);
910 new_other_cost
= insn_rtx_cost (newotherpat
, optimize_this_for_speed_p
);
911 if (old_other_cost
> 0 && new_other_cost
> 0)
913 old_cost
+= old_other_cost
;
914 new_cost
+= new_other_cost
;
920 /* Disallow this combination if both new_cost and old_cost are greater than
921 zero, and new_cost is greater than old cost. */
922 int reject
= old_cost
> 0 && new_cost
> old_cost
;
926 fprintf (dump_file
, "%s combination of insns ",
927 reject
? "rejecting" : "allowing");
929 fprintf (dump_file
, "%d, ", INSN_UID (i0
));
930 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
931 fprintf (dump_file
, "%d, ", INSN_UID (i1
));
932 fprintf (dump_file
, "%d and %d\n", INSN_UID (i2
), INSN_UID (i3
));
934 fprintf (dump_file
, "original costs ");
936 fprintf (dump_file
, "%d + ", i0_cost
);
937 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
938 fprintf (dump_file
, "%d + ", i1_cost
);
939 fprintf (dump_file
, "%d + %d = %d\n", i2_cost
, i3_cost
, old_cost
);
942 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
943 new_i2_cost
, new_i3_cost
, new_cost
);
945 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
951 /* Update the uid_insn_cost array with the replacement costs. */
952 INSN_COST (i2
) = new_i2_cost
;
953 INSN_COST (i3
) = new_i3_cost
;
965 /* Delete any insns that copy a register to itself. */
968 delete_noop_moves (void)
970 rtx_insn
*insn
, *next
;
973 FOR_EACH_BB_FN (bb
, cfun
)
975 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
977 next
= NEXT_INSN (insn
);
978 if (INSN_P (insn
) && noop_move_p (insn
))
981 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
983 delete_insn_and_edges (insn
);
990 /* Return false if we do not want to (or cannot) combine DEF. */
992 can_combine_def_p (df_ref def
)
994 /* Do not consider if it is pre/post modification in MEM. */
995 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
998 unsigned int regno
= DF_REF_REGNO (def
);
1000 /* Do not combine frame pointer adjustments. */
1001 if ((regno
== FRAME_POINTER_REGNUM
1002 && (!reload_completed
|| frame_pointer_needed
))
1003 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1004 && regno
== HARD_FRAME_POINTER_REGNUM
1005 && (!reload_completed
|| frame_pointer_needed
))
1006 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1007 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
]))
1013 /* Return false if we do not want to (or cannot) combine USE. */
1015 can_combine_use_p (df_ref use
)
1017 /* Do not consider the usage of the stack pointer by function call. */
1018 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1024 /* Fill in log links field for all insns. */
1027 create_log_links (void)
1030 rtx_insn
**next_use
;
1034 next_use
= XCNEWVEC (rtx_insn
*, max_reg_num ());
1036 /* Pass through each block from the end, recording the uses of each
1037 register and establishing log links when def is encountered.
1038 Note that we do not clear next_use array in order to save time,
1039 so we have to test whether the use is in the same basic block as def.
1041 There are a few cases below when we do not consider the definition or
1042 usage -- these are taken from original flow.c did. Don't ask me why it is
1043 done this way; I don't know and if it works, I don't want to know. */
1045 FOR_EACH_BB_FN (bb
, cfun
)
1047 FOR_BB_INSNS_REVERSE (bb
, insn
)
1049 if (!NONDEBUG_INSN_P (insn
))
1052 /* Log links are created only once. */
1053 gcc_assert (!LOG_LINKS (insn
));
1055 FOR_EACH_INSN_DEF (def
, insn
)
1057 unsigned int regno
= DF_REF_REGNO (def
);
1060 if (!next_use
[regno
])
1063 if (!can_combine_def_p (def
))
1066 use_insn
= next_use
[regno
];
1067 next_use
[regno
] = NULL
;
1069 if (BLOCK_FOR_INSN (use_insn
) != bb
)
1074 We don't build a LOG_LINK for hard registers contained
1075 in ASM_OPERANDs. If these registers get replaced,
1076 we might wind up changing the semantics of the insn,
1077 even if reload can make what appear to be valid
1078 assignments later. */
1079 if (regno
< FIRST_PSEUDO_REGISTER
1080 && asm_noperands (PATTERN (use_insn
)) >= 0)
1083 /* Don't add duplicate links between instructions. */
1084 struct insn_link
*links
;
1085 FOR_EACH_LOG_LINK (links
, use_insn
)
1086 if (insn
== links
->insn
&& regno
== links
->regno
)
1090 LOG_LINKS (use_insn
)
1091 = alloc_insn_link (insn
, regno
, LOG_LINKS (use_insn
));
1094 FOR_EACH_INSN_USE (use
, insn
)
1095 if (can_combine_use_p (use
))
1096 next_use
[DF_REF_REGNO (use
)] = insn
;
1103 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1104 true if we found a LOG_LINK that proves that A feeds B. This only works
1105 if there are no instructions between A and B which could have a link
1106 depending on A, since in that case we would not record a link for B.
1107 We also check the implicit dependency created by a cc0 setter/user
1111 insn_a_feeds_b (rtx_insn
*a
, rtx_insn
*b
)
1113 struct insn_link
*links
;
1114 FOR_EACH_LOG_LINK (links
, b
)
1115 if (links
->insn
== a
)
1117 if (HAVE_cc0
&& sets_cc0_p (a
))
1122 /* Main entry point for combiner. F is the first insn of the function.
1123 NREGS is the first unused pseudo-reg number.
1125 Return nonzero if the combiner has turned an indirect jump
1126 instruction into a direct jump. */
1128 combine_instructions (rtx_insn
*f
, unsigned int nregs
)
1130 rtx_insn
*insn
, *next
;
1132 struct insn_link
*links
, *nextlinks
;
1134 basic_block last_bb
;
1136 int new_direct_jump_p
= 0;
1138 for (first
= f
; first
&& !NONDEBUG_INSN_P (first
); )
1139 first
= NEXT_INSN (first
);
1143 combine_attempts
= 0;
1146 combine_successes
= 0;
1148 rtl_hooks
= combine_rtl_hooks
;
1150 reg_stat
.safe_grow_cleared (nregs
);
1152 init_recog_no_volatile ();
1154 /* Allocate array for insn info. */
1155 max_uid_known
= get_max_uid ();
1156 uid_log_links
= XCNEWVEC (struct insn_link
*, max_uid_known
+ 1);
1157 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1158 gcc_obstack_init (&insn_link_obstack
);
1160 nonzero_bits_mode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
1162 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1163 problems when, for example, we have j <<= 1 in a loop. */
1165 nonzero_sign_valid
= 0;
1166 label_tick
= label_tick_ebb_start
= 1;
1168 /* Scan all SETs and see if we can deduce anything about what
1169 bits are known to be zero for some registers and how many copies
1170 of the sign bit are known to exist for those registers.
1172 Also set any known values so that we can use it while searching
1173 for what bits are known to be set. */
1175 setup_incoming_promotions (first
);
1176 /* Allow the entry block and the first block to fall into the same EBB.
1177 Conceptually the incoming promotions are assigned to the entry block. */
1178 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1180 create_log_links ();
1181 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1183 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1188 if (!single_pred_p (this_basic_block
)
1189 || single_pred (this_basic_block
) != last_bb
)
1190 label_tick_ebb_start
= label_tick
;
1191 last_bb
= this_basic_block
;
1193 FOR_BB_INSNS (this_basic_block
, insn
)
1194 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1198 subst_low_luid
= DF_INSN_LUID (insn
);
1201 note_stores (PATTERN (insn
), set_nonzero_bits_and_sign_copies
,
1203 record_dead_and_set_regs (insn
);
1206 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1207 if (REG_NOTE_KIND (links
) == REG_INC
)
1208 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1211 /* Record the current insn_rtx_cost of this instruction. */
1212 if (NONJUMP_INSN_P (insn
))
1213 INSN_COST (insn
) = insn_rtx_cost (PATTERN (insn
),
1214 optimize_this_for_speed_p
);
1217 fprintf (dump_file
, "insn_cost %d for ", INSN_COST (insn
));
1218 dump_insn_slim (dump_file
, insn
);
1223 nonzero_sign_valid
= 1;
1225 /* Now scan all the insns in forward order. */
1226 label_tick
= label_tick_ebb_start
= 1;
1228 setup_incoming_promotions (first
);
1229 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1230 int max_combine
= PARAM_VALUE (PARAM_MAX_COMBINE_INSNS
);
1232 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1234 rtx_insn
*last_combined_insn
= NULL
;
1235 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1240 if (!single_pred_p (this_basic_block
)
1241 || single_pred (this_basic_block
) != last_bb
)
1242 label_tick_ebb_start
= label_tick
;
1243 last_bb
= this_basic_block
;
1245 rtl_profile_for_bb (this_basic_block
);
1246 for (insn
= BB_HEAD (this_basic_block
);
1247 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1248 insn
= next
? next
: NEXT_INSN (insn
))
1251 if (!NONDEBUG_INSN_P (insn
))
1254 while (last_combined_insn
1255 && (!NONDEBUG_INSN_P (last_combined_insn
)
1256 || last_combined_insn
->deleted ()))
1257 last_combined_insn
= PREV_INSN (last_combined_insn
);
1258 if (last_combined_insn
== NULL_RTX
1259 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1260 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1261 last_combined_insn
= insn
;
1263 /* See if we know about function return values before this
1264 insn based upon SUBREG flags. */
1265 check_promoted_subreg (insn
, PATTERN (insn
));
1267 /* See if we can find hardregs and subreg of pseudos in
1268 narrower modes. This could help turning TRUNCATEs
1270 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1272 /* Try this insn with each insn it links back to. */
1274 FOR_EACH_LOG_LINK (links
, insn
)
1275 if ((next
= try_combine (insn
, links
->insn
, NULL
,
1276 NULL
, &new_direct_jump_p
,
1277 last_combined_insn
)) != 0)
1279 statistics_counter_event (cfun
, "two-insn combine", 1);
1283 /* Try each sequence of three linked insns ending with this one. */
1285 if (max_combine
>= 3)
1286 FOR_EACH_LOG_LINK (links
, insn
)
1288 rtx_insn
*link
= links
->insn
;
1290 /* If the linked insn has been replaced by a note, then there
1291 is no point in pursuing this chain any further. */
1295 FOR_EACH_LOG_LINK (nextlinks
, link
)
1296 if ((next
= try_combine (insn
, link
, nextlinks
->insn
,
1297 NULL
, &new_direct_jump_p
,
1298 last_combined_insn
)) != 0)
1300 statistics_counter_event (cfun
, "three-insn combine", 1);
1305 /* Try to combine a jump insn that uses CC0
1306 with a preceding insn that sets CC0, and maybe with its
1307 logical predecessor as well.
1308 This is how we make decrement-and-branch insns.
1309 We need this special code because data flow connections
1310 via CC0 do not get entered in LOG_LINKS. */
1314 && (prev
= prev_nonnote_insn (insn
)) != 0
1315 && NONJUMP_INSN_P (prev
)
1316 && sets_cc0_p (PATTERN (prev
)))
1318 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1320 last_combined_insn
)) != 0)
1323 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1324 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1325 NULL
, &new_direct_jump_p
,
1326 last_combined_insn
)) != 0)
1330 /* Do the same for an insn that explicitly references CC0. */
1331 if (HAVE_cc0
&& NONJUMP_INSN_P (insn
)
1332 && (prev
= prev_nonnote_insn (insn
)) != 0
1333 && NONJUMP_INSN_P (prev
)
1334 && sets_cc0_p (PATTERN (prev
))
1335 && GET_CODE (PATTERN (insn
)) == SET
1336 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (insn
))))
1338 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1340 last_combined_insn
)) != 0)
1343 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1344 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1345 NULL
, &new_direct_jump_p
,
1346 last_combined_insn
)) != 0)
1350 /* Finally, see if any of the insns that this insn links to
1351 explicitly references CC0. If so, try this insn, that insn,
1352 and its predecessor if it sets CC0. */
1355 FOR_EACH_LOG_LINK (links
, insn
)
1356 if (NONJUMP_INSN_P (links
->insn
)
1357 && GET_CODE (PATTERN (links
->insn
)) == SET
1358 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (links
->insn
)))
1359 && (prev
= prev_nonnote_insn (links
->insn
)) != 0
1360 && NONJUMP_INSN_P (prev
)
1361 && sets_cc0_p (PATTERN (prev
))
1362 && (next
= try_combine (insn
, links
->insn
,
1363 prev
, NULL
, &new_direct_jump_p
,
1364 last_combined_insn
)) != 0)
1368 /* Try combining an insn with two different insns whose results it
1370 if (max_combine
>= 3)
1371 FOR_EACH_LOG_LINK (links
, insn
)
1372 for (nextlinks
= links
->next
; nextlinks
;
1373 nextlinks
= nextlinks
->next
)
1374 if ((next
= try_combine (insn
, links
->insn
,
1375 nextlinks
->insn
, NULL
,
1377 last_combined_insn
)) != 0)
1380 statistics_counter_event (cfun
, "three-insn combine", 1);
1384 /* Try four-instruction combinations. */
1385 if (max_combine
>= 4)
1386 FOR_EACH_LOG_LINK (links
, insn
)
1388 struct insn_link
*next1
;
1389 rtx_insn
*link
= links
->insn
;
1391 /* If the linked insn has been replaced by a note, then there
1392 is no point in pursuing this chain any further. */
1396 FOR_EACH_LOG_LINK (next1
, link
)
1398 rtx_insn
*link1
= next1
->insn
;
1401 /* I0 -> I1 -> I2 -> I3. */
1402 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1403 if ((next
= try_combine (insn
, link
, link1
,
1406 last_combined_insn
)) != 0)
1408 statistics_counter_event (cfun
, "four-insn combine", 1);
1411 /* I0, I1 -> I2, I2 -> I3. */
1412 for (nextlinks
= next1
->next
; nextlinks
;
1413 nextlinks
= nextlinks
->next
)
1414 if ((next
= try_combine (insn
, link
, link1
,
1417 last_combined_insn
)) != 0)
1419 statistics_counter_event (cfun
, "four-insn combine", 1);
1424 for (next1
= links
->next
; next1
; next1
= next1
->next
)
1426 rtx_insn
*link1
= next1
->insn
;
1429 /* I0 -> I2; I1, I2 -> I3. */
1430 FOR_EACH_LOG_LINK (nextlinks
, link
)
1431 if ((next
= try_combine (insn
, link
, link1
,
1434 last_combined_insn
)) != 0)
1436 statistics_counter_event (cfun
, "four-insn combine", 1);
1439 /* I0 -> I1; I1, I2 -> I3. */
1440 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1441 if ((next
= try_combine (insn
, link
, link1
,
1444 last_combined_insn
)) != 0)
1446 statistics_counter_event (cfun
, "four-insn combine", 1);
1452 /* Try this insn with each REG_EQUAL note it links back to. */
1453 FOR_EACH_LOG_LINK (links
, insn
)
1456 rtx_insn
*temp
= links
->insn
;
1457 if ((set
= single_set (temp
)) != 0
1458 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1459 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1460 /* Avoid using a register that may already been marked
1461 dead by an earlier instruction. */
1462 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1463 && (GET_MODE (note
) == VOIDmode
1464 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1465 : (GET_MODE (SET_DEST (set
)) == GET_MODE (note
)
1466 && (GET_CODE (SET_DEST (set
)) != ZERO_EXTRACT
1467 || (GET_MODE (XEXP (SET_DEST (set
), 0))
1468 == GET_MODE (note
))))))
1470 /* Temporarily replace the set's source with the
1471 contents of the REG_EQUAL note. The insn will
1472 be deleted or recognized by try_combine. */
1473 rtx orig_src
= SET_SRC (set
);
1474 rtx orig_dest
= SET_DEST (set
);
1475 if (GET_CODE (SET_DEST (set
)) == ZERO_EXTRACT
)
1476 SET_DEST (set
) = XEXP (SET_DEST (set
), 0);
1477 SET_SRC (set
) = note
;
1479 i2mod_old_rhs
= copy_rtx (orig_src
);
1480 i2mod_new_rhs
= copy_rtx (note
);
1481 next
= try_combine (insn
, i2mod
, NULL
, NULL
,
1483 last_combined_insn
);
1487 statistics_counter_event (cfun
, "insn-with-note combine", 1);
1490 SET_SRC (set
) = orig_src
;
1491 SET_DEST (set
) = orig_dest
;
1496 record_dead_and_set_regs (insn
);
1503 default_rtl_profile ();
1505 new_direct_jump_p
|= purge_all_dead_edges ();
1506 delete_noop_moves ();
1509 obstack_free (&insn_link_obstack
, NULL
);
1510 free (uid_log_links
);
1511 free (uid_insn_cost
);
1512 reg_stat
.release ();
1515 struct undo
*undo
, *next
;
1516 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1524 total_attempts
+= combine_attempts
;
1525 total_merges
+= combine_merges
;
1526 total_extras
+= combine_extras
;
1527 total_successes
+= combine_successes
;
1529 nonzero_sign_valid
= 0;
1530 rtl_hooks
= general_rtl_hooks
;
1532 /* Make recognizer allow volatile MEMs again. */
1535 return new_direct_jump_p
;
1538 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1541 init_reg_last (void)
1546 FOR_EACH_VEC_ELT (reg_stat
, i
, p
)
1547 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1550 /* Set up any promoted values for incoming argument registers. */
1553 setup_incoming_promotions (rtx_insn
*first
)
1556 bool strictly_local
= false;
1558 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1559 arg
= DECL_CHAIN (arg
))
1561 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1563 machine_mode mode1
, mode2
, mode3
, mode4
;
1565 /* Only continue if the incoming argument is in a register. */
1569 /* Determine, if possible, whether all call sites of the current
1570 function lie within the current compilation unit. (This does
1571 take into account the exporting of a function via taking its
1572 address, and so forth.) */
1573 strictly_local
= cgraph_node::local_info (current_function_decl
)->local
;
1575 /* The mode and signedness of the argument before any promotions happen
1576 (equal to the mode of the pseudo holding it at that stage). */
1577 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1578 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1580 /* The mode and signedness of the argument after any source language and
1581 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1582 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1583 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1585 /* The mode and signedness of the argument as it is actually passed,
1586 see assign_parm_setup_reg in function.c. */
1587 mode3
= promote_function_mode (TREE_TYPE (arg
), mode1
, &uns3
,
1588 TREE_TYPE (cfun
->decl
), 0);
1590 /* The mode of the register in which the argument is being passed. */
1591 mode4
= GET_MODE (reg
);
1593 /* Eliminate sign extensions in the callee when:
1594 (a) A mode promotion has occurred; */
1597 /* (b) The mode of the register is the same as the mode of
1598 the argument as it is passed; */
1601 /* (c) There's no language level extension; */
1604 /* (c.1) All callers are from the current compilation unit. If that's
1605 the case we don't have to rely on an ABI, we only have to know
1606 what we're generating right now, and we know that we will do the
1607 mode1 to mode2 promotion with the given sign. */
1608 else if (!strictly_local
)
1610 /* (c.2) The combination of the two promotions is useful. This is
1611 true when the signs match, or if the first promotion is unsigned.
1612 In the later case, (sign_extend (zero_extend x)) is the same as
1613 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1619 /* Record that the value was promoted from mode1 to mode3,
1620 so that any sign extension at the head of the current
1621 function may be eliminated. */
1622 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1623 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1624 record_value_for_reg (reg
, first
, x
);
1628 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1629 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1630 because some machines (maybe most) will actually do the sign-extension and
1631 this is the conservative approach.
1633 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1637 sign_extend_short_imm (rtx src
, machine_mode mode
, unsigned int prec
)
1639 scalar_int_mode int_mode
;
1640 if (CONST_INT_P (src
)
1641 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1642 && GET_MODE_PRECISION (int_mode
) < prec
1644 && val_signbit_known_set_p (int_mode
, INTVAL (src
)))
1645 src
= GEN_INT (INTVAL (src
) | ~GET_MODE_MASK (int_mode
));
1650 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1654 update_rsp_from_reg_equal (reg_stat_type
*rsp
, rtx_insn
*insn
, const_rtx set
,
1657 rtx reg_equal_note
= insn
? find_reg_equal_equiv_note (insn
) : NULL_RTX
;
1658 unsigned HOST_WIDE_INT bits
= 0;
1659 rtx reg_equal
= NULL
, src
= SET_SRC (set
);
1660 unsigned int num
= 0;
1663 reg_equal
= XEXP (reg_equal_note
, 0);
1665 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
1667 src
= sign_extend_short_imm (src
, GET_MODE (x
), BITS_PER_WORD
);
1669 reg_equal
= sign_extend_short_imm (reg_equal
, GET_MODE (x
), BITS_PER_WORD
);
1672 /* Don't call nonzero_bits if it cannot change anything. */
1673 if (rsp
->nonzero_bits
!= HOST_WIDE_INT_M1U
)
1675 bits
= nonzero_bits (src
, nonzero_bits_mode
);
1676 if (reg_equal
&& bits
)
1677 bits
&= nonzero_bits (reg_equal
, nonzero_bits_mode
);
1678 rsp
->nonzero_bits
|= bits
;
1681 /* Don't call num_sign_bit_copies if it cannot change anything. */
1682 if (rsp
->sign_bit_copies
!= 1)
1684 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1685 if (reg_equal
&& num
!= GET_MODE_PRECISION (GET_MODE (x
)))
1687 unsigned int numeq
= num_sign_bit_copies (reg_equal
, GET_MODE (x
));
1688 if (num
== 0 || numeq
> num
)
1691 if (rsp
->sign_bit_copies
== 0 || num
< rsp
->sign_bit_copies
)
1692 rsp
->sign_bit_copies
= num
;
1696 /* Called via note_stores. If X is a pseudo that is narrower than
1697 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1699 If we are setting only a portion of X and we can't figure out what
1700 portion, assume all bits will be used since we don't know what will
1703 Similarly, set how many bits of X are known to be copies of the sign bit
1704 at all locations in the function. This is the smallest number implied
1708 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1710 rtx_insn
*insn
= (rtx_insn
*) data
;
1711 scalar_int_mode mode
;
1714 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1715 /* If this register is undefined at the start of the file, we can't
1716 say what its contents were. */
1717 && ! REGNO_REG_SET_P
1718 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), REGNO (x
))
1719 && is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
1720 && HWI_COMPUTABLE_MODE_P (mode
))
1722 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
1724 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1726 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1727 rsp
->sign_bit_copies
= 1;
1731 /* If this register is being initialized using itself, and the
1732 register is uninitialized in this basic block, and there are
1733 no LOG_LINKS which set the register, then part of the
1734 register is uninitialized. In that case we can't assume
1735 anything about the number of nonzero bits.
1737 ??? We could do better if we checked this in
1738 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1739 could avoid making assumptions about the insn which initially
1740 sets the register, while still using the information in other
1741 insns. We would have to be careful to check every insn
1742 involved in the combination. */
1745 && reg_referenced_p (x
, PATTERN (insn
))
1746 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1749 struct insn_link
*link
;
1751 FOR_EACH_LOG_LINK (link
, insn
)
1752 if (dead_or_set_p (link
->insn
, x
))
1756 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1757 rsp
->sign_bit_copies
= 1;
1762 /* If this is a complex assignment, see if we can convert it into a
1763 simple assignment. */
1764 set
= expand_field_assignment (set
);
1766 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1767 set what we know about X. */
1769 if (SET_DEST (set
) == x
1770 || (paradoxical_subreg_p (SET_DEST (set
))
1771 && SUBREG_REG (SET_DEST (set
)) == x
))
1772 update_rsp_from_reg_equal (rsp
, insn
, set
, x
);
1775 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1776 rsp
->sign_bit_copies
= 1;
1781 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1782 optionally insns that were previously combined into I3 or that will be
1783 combined into the merger of INSN and I3. The order is PRED, PRED2,
1784 INSN, SUCC, SUCC2, I3.
1786 Return 0 if the combination is not allowed for any reason.
1788 If the combination is allowed, *PDEST will be set to the single
1789 destination of INSN and *PSRC to the single source, and this function
1793 can_combine_p (rtx_insn
*insn
, rtx_insn
*i3
, rtx_insn
*pred ATTRIBUTE_UNUSED
,
1794 rtx_insn
*pred2 ATTRIBUTE_UNUSED
, rtx_insn
*succ
, rtx_insn
*succ2
,
1795 rtx
*pdest
, rtx
*psrc
)
1802 bool all_adjacent
= true;
1803 int (*is_volatile_p
) (const_rtx
);
1809 if (next_active_insn (succ2
) != i3
)
1810 all_adjacent
= false;
1811 if (next_active_insn (succ
) != succ2
)
1812 all_adjacent
= false;
1814 else if (next_active_insn (succ
) != i3
)
1815 all_adjacent
= false;
1816 if (next_active_insn (insn
) != succ
)
1817 all_adjacent
= false;
1819 else if (next_active_insn (insn
) != i3
)
1820 all_adjacent
= false;
1822 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1823 or a PARALLEL consisting of such a SET and CLOBBERs.
1825 If INSN has CLOBBER parallel parts, ignore them for our processing.
1826 By definition, these happen during the execution of the insn. When it
1827 is merged with another insn, all bets are off. If they are, in fact,
1828 needed and aren't also supplied in I3, they may be added by
1829 recog_for_combine. Otherwise, it won't match.
1831 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1834 Get the source and destination of INSN. If more than one, can't
1837 if (GET_CODE (PATTERN (insn
)) == SET
)
1838 set
= PATTERN (insn
);
1839 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1840 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1842 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1844 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1846 switch (GET_CODE (elt
))
1848 /* This is important to combine floating point insns
1849 for the SH4 port. */
1851 /* Combining an isolated USE doesn't make sense.
1852 We depend here on combinable_i3pat to reject them. */
1853 /* The code below this loop only verifies that the inputs of
1854 the SET in INSN do not change. We call reg_set_between_p
1855 to verify that the REG in the USE does not change between
1857 If the USE in INSN was for a pseudo register, the matching
1858 insn pattern will likely match any register; combining this
1859 with any other USE would only be safe if we knew that the
1860 used registers have identical values, or if there was
1861 something to tell them apart, e.g. different modes. For
1862 now, we forgo such complicated tests and simply disallow
1863 combining of USES of pseudo registers with any other USE. */
1864 if (REG_P (XEXP (elt
, 0))
1865 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1867 rtx i3pat
= PATTERN (i3
);
1868 int i
= XVECLEN (i3pat
, 0) - 1;
1869 unsigned int regno
= REGNO (XEXP (elt
, 0));
1873 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1875 if (GET_CODE (i3elt
) == USE
1876 && REG_P (XEXP (i3elt
, 0))
1877 && (REGNO (XEXP (i3elt
, 0)) == regno
1878 ? reg_set_between_p (XEXP (elt
, 0),
1879 PREV_INSN (insn
), i3
)
1880 : regno
>= FIRST_PSEUDO_REGISTER
))
1887 /* We can ignore CLOBBERs. */
1892 /* Ignore SETs whose result isn't used but not those that
1893 have side-effects. */
1894 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1895 && insn_nothrow_p (insn
)
1896 && !side_effects_p (elt
))
1899 /* If we have already found a SET, this is a second one and
1900 so we cannot combine with this insn. */
1908 /* Anything else means we can't combine. */
1914 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1915 so don't do anything with it. */
1916 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1925 /* The simplification in expand_field_assignment may call back to
1926 get_last_value, so set safe guard here. */
1927 subst_low_luid
= DF_INSN_LUID (insn
);
1929 set
= expand_field_assignment (set
);
1930 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1932 /* Do not eliminate user-specified register if it is in an
1933 asm input because we may break the register asm usage defined
1934 in GCC manual if allow to do so.
1935 Be aware that this may cover more cases than we expect but this
1936 should be harmless. */
1937 if (REG_P (dest
) && REG_USERVAR_P (dest
) && HARD_REGISTER_P (dest
)
1938 && extract_asm_operands (PATTERN (i3
)))
1941 /* Don't eliminate a store in the stack pointer. */
1942 if (dest
== stack_pointer_rtx
1943 /* Don't combine with an insn that sets a register to itself if it has
1944 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1945 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1946 /* Can't merge an ASM_OPERANDS. */
1947 || GET_CODE (src
) == ASM_OPERANDS
1948 /* Can't merge a function call. */
1949 || GET_CODE (src
) == CALL
1950 /* Don't eliminate a function call argument. */
1952 && (find_reg_fusage (i3
, USE
, dest
)
1954 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1955 && global_regs
[REGNO (dest
)])))
1956 /* Don't substitute into an incremented register. */
1957 || FIND_REG_INC_NOTE (i3
, dest
)
1958 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1959 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1960 /* Don't substitute into a non-local goto, this confuses CFG. */
1961 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
1962 /* Make sure that DEST is not used after INSN but before SUCC, or
1963 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1966 && (reg_used_between_p (dest
, succ2
, i3
)
1967 || reg_used_between_p (dest
, succ
, succ2
)))
1968 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))
1970 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1971 that case SUCC is not in the insn stream, so use SUCC2
1972 instead for this test. */
1973 && reg_used_between_p (dest
, insn
,
1975 && INSN_UID (succ
) == INSN_UID (succ2
)
1977 /* Make sure that the value that is to be substituted for the register
1978 does not use any registers whose values alter in between. However,
1979 If the insns are adjacent, a use can't cross a set even though we
1980 think it might (this can happen for a sequence of insns each setting
1981 the same destination; last_set of that register might point to
1982 a NOTE). If INSN has a REG_EQUIV note, the register is always
1983 equivalent to the memory so the substitution is valid even if there
1984 are intervening stores. Also, don't move a volatile asm or
1985 UNSPEC_VOLATILE across any other insns. */
1988 || ! find_reg_note (insn
, REG_EQUIV
, src
))
1989 && use_crosses_set_p (src
, DF_INSN_LUID (insn
)))
1990 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
1991 || GET_CODE (src
) == UNSPEC_VOLATILE
))
1992 /* Don't combine across a CALL_INSN, because that would possibly
1993 change whether the life span of some REGs crosses calls or not,
1994 and it is a pain to update that information.
1995 Exception: if source is a constant, moving it later can't hurt.
1996 Accept that as a special case. */
1997 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
2000 /* DEST must either be a REG or CC0. */
2003 /* If register alignment is being enforced for multi-word items in all
2004 cases except for parameters, it is possible to have a register copy
2005 insn referencing a hard register that is not allowed to contain the
2006 mode being copied and which would not be valid as an operand of most
2007 insns. Eliminate this problem by not combining with such an insn.
2009 Also, on some machines we don't want to extend the life of a hard
2013 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
2014 && ! HARD_REGNO_MODE_OK (REGNO (dest
), GET_MODE (dest
)))
2015 /* Don't extend the life of a hard register unless it is
2016 user variable (if we have few registers) or it can't
2017 fit into the desired register (meaning something special
2019 Also avoid substituting a return register into I3, because
2020 reload can't handle a conflict with constraints of other
2022 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
2023 && ! HARD_REGNO_MODE_OK (REGNO (src
), GET_MODE (src
)))))
2026 else if (GET_CODE (dest
) != CC0
)
2030 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
2031 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
2032 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
2034 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
2036 /* If the clobber represents an earlyclobber operand, we must not
2037 substitute an expression containing the clobbered register.
2038 As we do not analyze the constraint strings here, we have to
2039 make the conservative assumption. However, if the register is
2040 a fixed hard reg, the clobber cannot represent any operand;
2041 we leave it up to the machine description to either accept or
2042 reject use-and-clobber patterns. */
2044 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
2045 || !fixed_regs
[REGNO (reg
)])
2046 if (reg_overlap_mentioned_p (reg
, src
))
2050 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2051 or not), reject, unless nothing volatile comes between it and I3 */
2053 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
2055 /* Make sure neither succ nor succ2 contains a volatile reference. */
2056 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
2058 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
2060 /* We'll check insns between INSN and I3 below. */
2063 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2064 to be an explicit register variable, and was chosen for a reason. */
2066 if (GET_CODE (src
) == ASM_OPERANDS
2067 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
2070 /* If INSN contains volatile references (specifically volatile MEMs),
2071 we cannot combine across any other volatile references.
2072 Even if INSN doesn't contain volatile references, any intervening
2073 volatile insn might affect machine state. */
2075 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
2079 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
2080 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
2083 /* If INSN contains an autoincrement or autodecrement, make sure that
2084 register is not used between there and I3, and not already used in
2085 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2086 Also insist that I3 not be a jump; if it were one
2087 and the incremented register were spilled, we would lose. */
2090 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2091 if (REG_NOTE_KIND (link
) == REG_INC
2093 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
2094 || (pred
!= NULL_RTX
2095 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
2096 || (pred2
!= NULL_RTX
2097 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
2098 || (succ
!= NULL_RTX
2099 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
2100 || (succ2
!= NULL_RTX
2101 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
2102 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
2105 /* Don't combine an insn that follows a CC0-setting insn.
2106 An insn that uses CC0 must not be separated from the one that sets it.
2107 We do, however, allow I2 to follow a CC0-setting insn if that insn
2108 is passed as I1; in that case it will be deleted also.
2109 We also allow combining in this case if all the insns are adjacent
2110 because that would leave the two CC0 insns adjacent as well.
2111 It would be more logical to test whether CC0 occurs inside I1 or I2,
2112 but that would be much slower, and this ought to be equivalent. */
2116 p
= prev_nonnote_insn (insn
);
2117 if (p
&& p
!= pred
&& NONJUMP_INSN_P (p
) && sets_cc0_p (PATTERN (p
))
2122 /* If we get here, we have passed all the tests and the combination is
2131 /* LOC is the location within I3 that contains its pattern or the component
2132 of a PARALLEL of the pattern. We validate that it is valid for combining.
2134 One problem is if I3 modifies its output, as opposed to replacing it
2135 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2136 doing so would produce an insn that is not equivalent to the original insns.
2140 (set (reg:DI 101) (reg:DI 100))
2141 (set (subreg:SI (reg:DI 101) 0) <foo>)
2143 This is NOT equivalent to:
2145 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2146 (set (reg:DI 101) (reg:DI 100))])
2148 Not only does this modify 100 (in which case it might still be valid
2149 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2151 We can also run into a problem if I2 sets a register that I1
2152 uses and I1 gets directly substituted into I3 (not via I2). In that
2153 case, we would be getting the wrong value of I2DEST into I3, so we
2154 must reject the combination. This case occurs when I2 and I1 both
2155 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2156 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2157 of a SET must prevent combination from occurring. The same situation
2158 can occur for I0, in which case I0_NOT_IN_SRC is set.
2160 Before doing the above check, we first try to expand a field assignment
2161 into a set of logical operations.
2163 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2164 we place a register that is both set and used within I3. If more than one
2165 such register is detected, we fail.
2167 Return 1 if the combination is valid, zero otherwise. */
2170 combinable_i3pat (rtx_insn
*i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2171 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2175 if (GET_CODE (x
) == SET
)
2178 rtx dest
= SET_DEST (set
);
2179 rtx src
= SET_SRC (set
);
2180 rtx inner_dest
= dest
;
2183 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2184 || GET_CODE (inner_dest
) == SUBREG
2185 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2186 inner_dest
= XEXP (inner_dest
, 0);
2188 /* Check for the case where I3 modifies its output, as discussed
2189 above. We don't want to prevent pseudos from being combined
2190 into the address of a MEM, so only prevent the combination if
2191 i1 or i2 set the same MEM. */
2192 if ((inner_dest
!= dest
&&
2193 (!MEM_P (inner_dest
)
2194 || rtx_equal_p (i2dest
, inner_dest
)
2195 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2196 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2197 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2198 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2199 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2201 /* This is the same test done in can_combine_p except we can't test
2202 all_adjacent; we don't have to, since this instruction will stay
2203 in place, thus we are not considering increasing the lifetime of
2206 Also, if this insn sets a function argument, combining it with
2207 something that might need a spill could clobber a previous
2208 function argument; the all_adjacent test in can_combine_p also
2209 checks this; here, we do a more specific test for this case. */
2211 || (REG_P (inner_dest
)
2212 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2213 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest
),
2214 GET_MODE (inner_dest
))))
2215 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2216 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2219 /* If DEST is used in I3, it is being killed in this insn, so
2220 record that for later. We have to consider paradoxical
2221 subregs here, since they kill the whole register, but we
2222 ignore partial subregs, STRICT_LOW_PART, etc.
2223 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2224 STACK_POINTER_REGNUM, since these are always considered to be
2225 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2227 if (GET_CODE (subdest
) == SUBREG
2228 && (GET_MODE_SIZE (GET_MODE (subdest
))
2229 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest
)))))
2230 subdest
= SUBREG_REG (subdest
);
2233 && reg_referenced_p (subdest
, PATTERN (i3
))
2234 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2235 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2236 || REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
)
2237 && (FRAME_POINTER_REGNUM
== ARG_POINTER_REGNUM
2238 || (REGNO (subdest
) != ARG_POINTER_REGNUM
2239 || ! fixed_regs
[REGNO (subdest
)]))
2240 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2242 if (*pi3dest_killed
)
2245 *pi3dest_killed
= subdest
;
2249 else if (GET_CODE (x
) == PARALLEL
)
2253 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2254 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2255 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2262 /* Return 1 if X is an arithmetic expression that contains a multiplication
2263 and division. We don't count multiplications by powers of two here. */
2266 contains_muldiv (rtx x
)
2268 switch (GET_CODE (x
))
2270 case MOD
: case DIV
: case UMOD
: case UDIV
:
2274 return ! (CONST_INT_P (XEXP (x
, 1))
2275 && pow2p_hwi (UINTVAL (XEXP (x
, 1))));
2278 return contains_muldiv (XEXP (x
, 0))
2279 || contains_muldiv (XEXP (x
, 1));
2282 return contains_muldiv (XEXP (x
, 0));
2288 /* Determine whether INSN can be used in a combination. Return nonzero if
2289 not. This is used in try_combine to detect early some cases where we
2290 can't perform combinations. */
2293 cant_combine_insn_p (rtx_insn
*insn
)
2298 /* If this isn't really an insn, we can't do anything.
2299 This can occur when flow deletes an insn that it has merged into an
2300 auto-increment address. */
2301 if (!NONDEBUG_INSN_P (insn
))
2304 /* Never combine loads and stores involving hard regs that are likely
2305 to be spilled. The register allocator can usually handle such
2306 reg-reg moves by tying. If we allow the combiner to make
2307 substitutions of likely-spilled regs, reload might die.
2308 As an exception, we allow combinations involving fixed regs; these are
2309 not available to the register allocator so there's no risk involved. */
2311 set
= single_set (insn
);
2314 src
= SET_SRC (set
);
2315 dest
= SET_DEST (set
);
2316 if (GET_CODE (src
) == SUBREG
)
2317 src
= SUBREG_REG (src
);
2318 if (GET_CODE (dest
) == SUBREG
)
2319 dest
= SUBREG_REG (dest
);
2320 if (REG_P (src
) && REG_P (dest
)
2321 && ((HARD_REGISTER_P (src
)
2322 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2323 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src
))))
2324 || (HARD_REGISTER_P (dest
)
2325 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2326 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2332 struct likely_spilled_retval_info
2334 unsigned regno
, nregs
;
2338 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2339 hard registers that are known to be written to / clobbered in full. */
2341 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2343 struct likely_spilled_retval_info
*const info
=
2344 (struct likely_spilled_retval_info
*) data
;
2345 unsigned regno
, nregs
;
2348 if (!REG_P (XEXP (set
, 0)))
2351 if (regno
>= info
->regno
+ info
->nregs
)
2353 nregs
= REG_NREGS (x
);
2354 if (regno
+ nregs
<= info
->regno
)
2356 new_mask
= (2U << (nregs
- 1)) - 1;
2357 if (regno
< info
->regno
)
2358 new_mask
>>= info
->regno
- regno
;
2360 new_mask
<<= regno
- info
->regno
;
2361 info
->mask
&= ~new_mask
;
2364 /* Return nonzero iff part of the return value is live during INSN, and
2365 it is likely spilled. This can happen when more than one insn is needed
2366 to copy the return value, e.g. when we consider to combine into the
2367 second copy insn for a complex value. */
2370 likely_spilled_retval_p (rtx_insn
*insn
)
2372 rtx_insn
*use
= BB_END (this_basic_block
);
2375 unsigned regno
, nregs
;
2376 /* We assume here that no machine mode needs more than
2377 32 hard registers when the value overlaps with a register
2378 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2380 struct likely_spilled_retval_info info
;
2382 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2384 reg
= XEXP (PATTERN (use
), 0);
2385 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2387 regno
= REGNO (reg
);
2388 nregs
= REG_NREGS (reg
);
2391 mask
= (2U << (nregs
- 1)) - 1;
2393 /* Disregard parts of the return value that are set later. */
2397 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2399 note_stores (PATTERN (p
), likely_spilled_retval_1
, &info
);
2402 /* Check if any of the (probably) live return value registers is
2407 if ((mask
& 1 << nregs
)
2408 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2414 /* Adjust INSN after we made a change to its destination.
2416 Changing the destination can invalidate notes that say something about
2417 the results of the insn and a LOG_LINK pointing to the insn. */
2420 adjust_for_new_dest (rtx_insn
*insn
)
2422 /* For notes, be conservative and simply remove them. */
2423 remove_reg_equal_equiv_notes (insn
);
2425 /* The new insn will have a destination that was previously the destination
2426 of an insn just above it. Call distribute_links to make a LOG_LINK from
2427 the next use of that destination. */
2429 rtx set
= single_set (insn
);
2432 rtx reg
= SET_DEST (set
);
2434 while (GET_CODE (reg
) == ZERO_EXTRACT
2435 || GET_CODE (reg
) == STRICT_LOW_PART
2436 || GET_CODE (reg
) == SUBREG
)
2437 reg
= XEXP (reg
, 0);
2438 gcc_assert (REG_P (reg
));
2440 distribute_links (alloc_insn_link (insn
, REGNO (reg
), NULL
));
2442 df_insn_rescan (insn
);
2445 /* Return TRUE if combine can reuse reg X in mode MODE.
2446 ADDED_SETS is nonzero if the original set is still required. */
2448 can_change_dest_mode (rtx x
, int added_sets
, machine_mode mode
)
2456 /* Allow hard registers if the new mode is legal, and occupies no more
2457 registers than the old mode. */
2458 if (regno
< FIRST_PSEUDO_REGISTER
)
2459 return (HARD_REGNO_MODE_OK (regno
, mode
)
2460 && REG_NREGS (x
) >= hard_regno_nregs
[regno
][mode
]);
2462 /* Or a pseudo that is only used once. */
2463 return (regno
< reg_n_sets_max
2464 && REG_N_SETS (regno
) == 1
2466 && !REG_USERVAR_P (x
));
2470 /* Check whether X, the destination of a set, refers to part of
2471 the register specified by REG. */
2474 reg_subword_p (rtx x
, rtx reg
)
2476 /* Check that reg is an integer mode register. */
2477 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2480 if (GET_CODE (x
) == STRICT_LOW_PART
2481 || GET_CODE (x
) == ZERO_EXTRACT
)
2484 return GET_CODE (x
) == SUBREG
2485 && SUBREG_REG (x
) == reg
2486 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2489 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2490 Note that the INSN should be deleted *after* removing dead edges, so
2491 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2492 but not for a (set (pc) (label_ref FOO)). */
2495 update_cfg_for_uncondjump (rtx_insn
*insn
)
2497 basic_block bb
= BLOCK_FOR_INSN (insn
);
2498 gcc_assert (BB_END (bb
) == insn
);
2500 purge_dead_edges (bb
);
2503 if (EDGE_COUNT (bb
->succs
) == 1)
2507 single_succ_edge (bb
)->flags
|= EDGE_FALLTHRU
;
2509 /* Remove barriers from the footer if there are any. */
2510 for (insn
= BB_FOOTER (bb
); insn
; insn
= NEXT_INSN (insn
))
2511 if (BARRIER_P (insn
))
2513 if (PREV_INSN (insn
))
2514 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
2516 BB_FOOTER (bb
) = NEXT_INSN (insn
);
2517 if (NEXT_INSN (insn
))
2518 SET_PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
2520 else if (LABEL_P (insn
))
2525 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2526 by an arbitrary number of CLOBBERs. */
2528 is_parallel_of_n_reg_sets (rtx pat
, int n
)
2530 if (GET_CODE (pat
) != PARALLEL
)
2533 int len
= XVECLEN (pat
, 0);
2538 for (i
= 0; i
< n
; i
++)
2539 if (GET_CODE (XVECEXP (pat
, 0, i
)) != SET
2540 || !REG_P (SET_DEST (XVECEXP (pat
, 0, i
))))
2542 for ( ; i
< len
; i
++)
2543 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
2544 || XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
2550 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2551 CLOBBERs), can be split into individual SETs in that order, without
2552 changing semantics. */
2554 can_split_parallel_of_n_reg_sets (rtx_insn
*insn
, int n
)
2556 if (!insn_nothrow_p (insn
))
2559 rtx pat
= PATTERN (insn
);
2562 for (i
= 0; i
< n
; i
++)
2564 if (side_effects_p (SET_SRC (XVECEXP (pat
, 0, i
))))
2567 rtx reg
= SET_DEST (XVECEXP (pat
, 0, i
));
2569 for (j
= i
+ 1; j
< n
; j
++)
2570 if (reg_referenced_p (reg
, XVECEXP (pat
, 0, j
)))
2577 /* Try to combine the insns I0, I1 and I2 into I3.
2578 Here I0, I1 and I2 appear earlier than I3.
2579 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2582 If we are combining more than two insns and the resulting insn is not
2583 recognized, try splitting it into two insns. If that happens, I2 and I3
2584 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2585 Otherwise, I0, I1 and I2 are pseudo-deleted.
2587 Return 0 if the combination does not work. Then nothing is changed.
2588 If we did the combination, return the insn at which combine should
2591 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2592 new direct jump instruction.
2594 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2595 been I3 passed to an earlier try_combine within the same basic
2599 try_combine (rtx_insn
*i3
, rtx_insn
*i2
, rtx_insn
*i1
, rtx_insn
*i0
,
2600 int *new_direct_jump_p
, rtx_insn
*last_combined_insn
)
2602 /* New patterns for I3 and I2, respectively. */
2603 rtx newpat
, newi2pat
= 0;
2604 rtvec newpat_vec_with_clobbers
= 0;
2605 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2606 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2608 int added_sets_0
, added_sets_1
, added_sets_2
;
2609 /* Total number of SETs to put into I3. */
2611 /* Nonzero if I2's or I1's body now appears in I3. */
2612 int i2_is_used
= 0, i1_is_used
= 0;
2613 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2614 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2615 /* Contains I3 if the destination of I3 is used in its source, which means
2616 that the old life of I3 is being killed. If that usage is placed into
2617 I2 and not in I3, a REG_DEAD note must be made. */
2618 rtx i3dest_killed
= 0;
2619 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2620 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2621 /* Copy of SET_SRC of I1 and I0, if needed. */
2622 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2623 /* Set if I2DEST was reused as a scratch register. */
2624 bool i2scratch
= false;
2625 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2626 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2627 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2628 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2629 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2630 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2631 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2632 /* Notes that must be added to REG_NOTES in I3 and I2. */
2633 rtx new_i3_notes
, new_i2_notes
;
2634 /* Notes that we substituted I3 into I2 instead of the normal case. */
2635 int i3_subst_into_i2
= 0;
2636 /* Notes that I1, I2 or I3 is a MULT operation. */
2639 int changed_i3_dest
= 0;
2642 rtx_insn
*temp_insn
;
2644 struct insn_link
*link
;
2646 rtx new_other_notes
;
2648 scalar_int_mode dest_mode
, temp_mode
;
2650 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2652 if (i1
== i2
|| i0
== i2
|| (i0
&& i0
== i1
))
2655 /* Only try four-insn combinations when there's high likelihood of
2656 success. Look for simple insns, such as loads of constants or
2657 binary operations involving a constant. */
2665 if (!flag_expensive_optimizations
)
2668 for (i
= 0; i
< 4; i
++)
2670 rtx_insn
*insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2671 rtx set
= single_set (insn
);
2675 src
= SET_SRC (set
);
2676 if (CONSTANT_P (src
))
2681 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2683 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2684 || GET_CODE (src
) == LSHIFTRT
)
2688 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2689 are likely manipulating its value. Ideally we'll be able to combine
2690 all four insns into a bitfield insertion of some kind.
2692 Note the source in I0 might be inside a sign/zero extension and the
2693 memory modes in I0 and I3 might be different. So extract the address
2694 from the destination of I3 and search for it in the source of I0.
2696 In the event that there's a match but the source/dest do not actually
2697 refer to the same memory, the worst that happens is we try some
2698 combinations that we wouldn't have otherwise. */
2699 if ((set0
= single_set (i0
))
2700 /* Ensure the source of SET0 is a MEM, possibly buried inside
2702 && (GET_CODE (SET_SRC (set0
)) == MEM
2703 || ((GET_CODE (SET_SRC (set0
)) == ZERO_EXTEND
2704 || GET_CODE (SET_SRC (set0
)) == SIGN_EXTEND
)
2705 && GET_CODE (XEXP (SET_SRC (set0
), 0)) == MEM
))
2706 && (set3
= single_set (i3
))
2707 /* Ensure the destination of SET3 is a MEM. */
2708 && GET_CODE (SET_DEST (set3
)) == MEM
2709 /* Would it be better to extract the base address for the MEM
2710 in SET3 and look for that? I don't have cases where it matters
2711 but I could envision such cases. */
2712 && rtx_referenced_p (XEXP (SET_DEST (set3
), 0), SET_SRC (set0
)))
2715 if (ngood
< 2 && nshift
< 2)
2719 /* Exit early if one of the insns involved can't be used for
2722 || (i1
&& CALL_P (i1
))
2723 || (i0
&& CALL_P (i0
))
2724 || cant_combine_insn_p (i3
)
2725 || cant_combine_insn_p (i2
)
2726 || (i1
&& cant_combine_insn_p (i1
))
2727 || (i0
&& cant_combine_insn_p (i0
))
2728 || likely_spilled_retval_p (i3
))
2732 undobuf
.other_insn
= 0;
2734 /* Reset the hard register usage information. */
2735 CLEAR_HARD_REG_SET (newpat_used_regs
);
2737 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2740 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2741 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2743 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2744 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2746 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2747 INSN_UID (i2
), INSN_UID (i3
));
2750 /* If multiple insns feed into one of I2 or I3, they can be in any
2751 order. To simplify the code below, reorder them in sequence. */
2752 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2754 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2756 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2759 added_links_insn
= 0;
2761 /* First check for one important special case that the code below will
2762 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2763 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2764 we may be able to replace that destination with the destination of I3.
2765 This occurs in the common code where we compute both a quotient and
2766 remainder into a structure, in which case we want to do the computation
2767 directly into the structure to avoid register-register copies.
2769 Note that this case handles both multiple sets in I2 and also cases
2770 where I2 has a number of CLOBBERs inside the PARALLEL.
2772 We make very conservative checks below and only try to handle the
2773 most common cases of this. For example, we only handle the case
2774 where I2 and I3 are adjacent to avoid making difficult register
2777 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2778 && REG_P (SET_SRC (PATTERN (i3
)))
2779 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2780 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2781 && GET_CODE (PATTERN (i2
)) == PARALLEL
2782 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2783 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2784 below would need to check what is inside (and reg_overlap_mentioned_p
2785 doesn't support those codes anyway). Don't allow those destinations;
2786 the resulting insn isn't likely to be recognized anyway. */
2787 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2788 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2789 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2790 SET_DEST (PATTERN (i3
)))
2791 && next_active_insn (i2
) == i3
)
2793 rtx p2
= PATTERN (i2
);
2795 /* Make sure that the destination of I3,
2796 which we are going to substitute into one output of I2,
2797 is not used within another output of I2. We must avoid making this:
2798 (parallel [(set (mem (reg 69)) ...)
2799 (set (reg 69) ...)])
2800 which is not well-defined as to order of actions.
2801 (Besides, reload can't handle output reloads for this.)
2803 The problem can also happen if the dest of I3 is a memory ref,
2804 if another dest in I2 is an indirect memory ref.
2806 Neither can this PARALLEL be an asm. We do not allow combining
2807 that usually (see can_combine_p), so do not here either. */
2809 for (i
= 0; ok
&& i
< XVECLEN (p2
, 0); i
++)
2811 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2812 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
)
2813 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2814 SET_DEST (XVECEXP (p2
, 0, i
))))
2816 else if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2817 && GET_CODE (SET_SRC (XVECEXP (p2
, 0, i
))) == ASM_OPERANDS
)
2822 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2823 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2824 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2829 subst_low_luid
= DF_INSN_LUID (i2
);
2831 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2832 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2833 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2834 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2836 /* Replace the dest in I2 with our dest and make the resulting
2837 insn the new pattern for I3. Then skip to where we validate
2838 the pattern. Everything was set up above. */
2839 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2841 i3_subst_into_i2
= 1;
2842 goto validate_replacement
;
2846 /* If I2 is setting a pseudo to a constant and I3 is setting some
2847 sub-part of it to another constant, merge them by making a new
2850 && (temp_expr
= single_set (i2
)) != 0
2851 && is_a
<scalar_int_mode
> (GET_MODE (SET_DEST (temp_expr
)), &temp_mode
)
2852 && CONST_SCALAR_INT_P (SET_SRC (temp_expr
))
2853 && GET_CODE (PATTERN (i3
)) == SET
2854 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3
)))
2855 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp_expr
)))
2857 rtx dest
= SET_DEST (PATTERN (i3
));
2858 rtx temp_dest
= SET_DEST (temp_expr
);
2862 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2864 if (CONST_INT_P (XEXP (dest
, 1))
2865 && CONST_INT_P (XEXP (dest
, 2))
2866 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (dest
, 0)),
2869 width
= INTVAL (XEXP (dest
, 1));
2870 offset
= INTVAL (XEXP (dest
, 2));
2871 dest
= XEXP (dest
, 0);
2872 if (BITS_BIG_ENDIAN
)
2873 offset
= GET_MODE_PRECISION (dest_mode
) - width
- offset
;
2878 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2879 dest
= XEXP (dest
, 0);
2880 if (is_a
<scalar_int_mode
> (GET_MODE (dest
), &dest_mode
))
2882 width
= GET_MODE_PRECISION (dest_mode
);
2889 /* If this is the low part, we're done. */
2890 if (subreg_lowpart_p (dest
))
2892 /* Handle the case where inner is twice the size of outer. */
2893 else if (GET_MODE_PRECISION (temp_mode
)
2894 == 2 * GET_MODE_PRECISION (dest_mode
))
2895 offset
+= GET_MODE_PRECISION (dest_mode
);
2896 /* Otherwise give up for now. */
2903 rtx inner
= SET_SRC (PATTERN (i3
));
2904 rtx outer
= SET_SRC (temp_expr
);
2906 wide_int o
= wi::insert (rtx_mode_t (outer
, temp_mode
),
2907 rtx_mode_t (inner
, dest_mode
),
2912 subst_low_luid
= DF_INSN_LUID (i2
);
2913 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2915 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2917 /* Replace the source in I2 with the new constant and make the
2918 resulting insn the new pattern for I3. Then skip to where we
2919 validate the pattern. Everything was set up above. */
2920 SUBST (SET_SRC (temp_expr
),
2921 immed_wide_int_const (o
, temp_mode
));
2923 newpat
= PATTERN (i2
);
2925 /* The dest of I3 has been replaced with the dest of I2. */
2926 changed_i3_dest
= 1;
2927 goto validate_replacement
;
2931 /* If we have no I1 and I2 looks like:
2932 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2934 make up a dummy I1 that is
2937 (set (reg:CC X) (compare:CC Y (const_int 0)))
2939 (We can ignore any trailing CLOBBERs.)
2941 This undoes a previous combination and allows us to match a branch-and-
2944 if (!HAVE_cc0
&& i1
== 0
2945 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2946 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
2948 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
2949 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
2950 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
2951 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1)))
2952 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2953 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2955 /* We make I1 with the same INSN_UID as I2. This gives it
2956 the same DF_INSN_LUID for value tracking. Our fake I1 will
2957 never appear in the insn stream so giving it the same INSN_UID
2958 as I2 will not cause a problem. */
2960 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2961 XVECEXP (PATTERN (i2
), 0, 1), INSN_LOCATION (i2
),
2963 INSN_UID (i1
) = INSN_UID (i2
);
2965 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
2966 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
2967 SET_DEST (PATTERN (i1
)));
2968 unsigned int regno
= REGNO (SET_DEST (PATTERN (i1
)));
2969 SUBST_LINK (LOG_LINKS (i2
),
2970 alloc_insn_link (i1
, regno
, LOG_LINKS (i2
)));
2973 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2974 make those two SETs separate I1 and I2 insns, and make an I0 that is
2976 if (!HAVE_cc0
&& i0
== 0
2977 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2978 && can_split_parallel_of_n_reg_sets (i2
, 2)
2979 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2980 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2982 /* If there is no I1, there is no I0 either. */
2985 /* We make I1 with the same INSN_UID as I2. This gives it
2986 the same DF_INSN_LUID for value tracking. Our fake I1 will
2987 never appear in the insn stream so giving it the same INSN_UID
2988 as I2 will not cause a problem. */
2990 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2991 XVECEXP (PATTERN (i2
), 0, 0), INSN_LOCATION (i2
),
2993 INSN_UID (i1
) = INSN_UID (i2
);
2995 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 1));
2998 /* Verify that I2 and I1 are valid for combining. */
2999 if (! can_combine_p (i2
, i3
, i0
, i1
, NULL
, NULL
, &i2dest
, &i2src
)
3000 || (i1
&& ! can_combine_p (i1
, i3
, i0
, NULL
, i2
, NULL
,
3002 || (i0
&& ! can_combine_p (i0
, i3
, NULL
, NULL
, i1
, i2
,
3009 /* Record whether I2DEST is used in I2SRC and similarly for the other
3010 cases. Knowing this will help in register status updating below. */
3011 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
3012 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
3013 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
3014 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
3015 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
3016 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
3017 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
3018 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
3019 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
3021 /* For the earlier insns, determine which of the subsequent ones they
3023 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
3024 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
3025 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
3026 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
3027 && reg_overlap_mentioned_p (i0dest
, i2src
))));
3029 /* Ensure that I3's pattern can be the destination of combines. */
3030 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
3031 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
3032 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
3033 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
3040 /* See if any of the insns is a MULT operation. Unless one is, we will
3041 reject a combination that is, since it must be slower. Be conservative
3043 if (GET_CODE (i2src
) == MULT
3044 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
3045 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
3046 || (GET_CODE (PATTERN (i3
)) == SET
3047 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
3050 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3051 We used to do this EXCEPT in one case: I3 has a post-inc in an
3052 output operand. However, that exception can give rise to insns like
3054 which is a famous insn on the PDP-11 where the value of r3 used as the
3055 source was model-dependent. Avoid this sort of thing. */
3058 if (!(GET_CODE (PATTERN (i3
)) == SET
3059 && REG_P (SET_SRC (PATTERN (i3
)))
3060 && MEM_P (SET_DEST (PATTERN (i3
)))
3061 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
3062 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
3063 /* It's not the exception. */
3068 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
3069 if (REG_NOTE_KIND (link
) == REG_INC
3070 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
3072 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
3079 /* See if the SETs in I1 or I2 need to be kept around in the merged
3080 instruction: whenever the value set there is still needed past I3.
3081 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3083 For the SET in I1, we have two cases: if I1 and I2 independently feed
3084 into I3, the set in I1 needs to be kept around unless I1DEST dies
3085 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3086 in I1 needs to be kept around unless I1DEST dies or is set in either
3087 I2 or I3. The same considerations apply to I0. */
3089 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
3092 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
3093 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
3098 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
3099 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
))
3100 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3101 && dead_or_set_p (i2
, i0dest
)));
3105 /* We are about to copy insns for the case where they need to be kept
3106 around. Check that they can be copied in the merged instruction. */
3108 if (targetm
.cannot_copy_insn_p
3109 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
3110 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
3111 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
3117 /* If the set in I2 needs to be kept around, we must make a copy of
3118 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3119 PATTERN (I2), we are only substituting for the original I1DEST, not into
3120 an already-substituted copy. This also prevents making self-referential
3121 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3126 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3127 i2pat
= gen_rtx_SET (i2dest
, copy_rtx (i2src
));
3129 i2pat
= copy_rtx (PATTERN (i2
));
3134 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3135 i1pat
= gen_rtx_SET (i1dest
, copy_rtx (i1src
));
3137 i1pat
= copy_rtx (PATTERN (i1
));
3142 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3143 i0pat
= gen_rtx_SET (i0dest
, copy_rtx (i0src
));
3145 i0pat
= copy_rtx (PATTERN (i0
));
3150 /* Substitute in the latest insn for the regs set by the earlier ones. */
3152 maxreg
= max_reg_num ();
3156 /* Many machines that don't use CC0 have insns that can both perform an
3157 arithmetic operation and set the condition code. These operations will
3158 be represented as a PARALLEL with the first element of the vector
3159 being a COMPARE of an arithmetic operation with the constant zero.
3160 The second element of the vector will set some pseudo to the result
3161 of the same arithmetic operation. If we simplify the COMPARE, we won't
3162 match such a pattern and so will generate an extra insn. Here we test
3163 for this case, where both the comparison and the operation result are
3164 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3165 I2SRC. Later we will make the PARALLEL that contains I2. */
3167 if (!HAVE_cc0
&& i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3168 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3169 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3
)), 1))
3170 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3173 rtx
*cc_use_loc
= NULL
;
3174 rtx_insn
*cc_use_insn
= NULL
;
3175 rtx op0
= i2src
, op1
= XEXP (SET_SRC (PATTERN (i3
)), 1);
3176 machine_mode compare_mode
, orig_compare_mode
;
3177 enum rtx_code compare_code
= UNKNOWN
, orig_compare_code
= UNKNOWN
;
3178 scalar_int_mode mode
;
3180 newpat
= PATTERN (i3
);
3181 newpat_dest
= SET_DEST (newpat
);
3182 compare_mode
= orig_compare_mode
= GET_MODE (newpat_dest
);
3184 if (undobuf
.other_insn
== 0
3185 && (cc_use_loc
= find_single_use (SET_DEST (newpat
), i3
,
3188 compare_code
= orig_compare_code
= GET_CODE (*cc_use_loc
);
3189 if (is_a
<scalar_int_mode
> (GET_MODE (i2dest
), &mode
))
3190 compare_code
= simplify_compare_const (compare_code
, mode
,
3192 target_canonicalize_comparison (&compare_code
, &op0
, &op1
, 1);
3195 /* Do the rest only if op1 is const0_rtx, which may be the
3196 result of simplification. */
3197 if (op1
== const0_rtx
)
3199 /* If a single use of the CC is found, prepare to modify it
3200 when SELECT_CC_MODE returns a new CC-class mode, or when
3201 the above simplify_compare_const() returned a new comparison
3202 operator. undobuf.other_insn is assigned the CC use insn
3203 when modifying it. */
3206 #ifdef SELECT_CC_MODE
3207 machine_mode new_mode
3208 = SELECT_CC_MODE (compare_code
, op0
, op1
);
3209 if (new_mode
!= orig_compare_mode
3210 && can_change_dest_mode (SET_DEST (newpat
),
3211 added_sets_2
, new_mode
))
3213 unsigned int regno
= REGNO (newpat_dest
);
3214 compare_mode
= new_mode
;
3215 if (regno
< FIRST_PSEUDO_REGISTER
)
3216 newpat_dest
= gen_rtx_REG (compare_mode
, regno
);
3219 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3220 newpat_dest
= regno_reg_rtx
[regno
];
3224 /* Cases for modifying the CC-using comparison. */
3225 if (compare_code
!= orig_compare_code
3226 /* ??? Do we need to verify the zero rtx? */
3227 && XEXP (*cc_use_loc
, 1) == const0_rtx
)
3229 /* Replace cc_use_loc with entire new RTX. */
3231 gen_rtx_fmt_ee (compare_code
, compare_mode
,
3232 newpat_dest
, const0_rtx
));
3233 undobuf
.other_insn
= cc_use_insn
;
3235 else if (compare_mode
!= orig_compare_mode
)
3237 /* Just replace the CC reg with a new mode. */
3238 SUBST (XEXP (*cc_use_loc
, 0), newpat_dest
);
3239 undobuf
.other_insn
= cc_use_insn
;
3243 /* Now we modify the current newpat:
3244 First, SET_DEST(newpat) is updated if the CC mode has been
3245 altered. For targets without SELECT_CC_MODE, this should be
3247 if (compare_mode
!= orig_compare_mode
)
3248 SUBST (SET_DEST (newpat
), newpat_dest
);
3249 /* This is always done to propagate i2src into newpat. */
3250 SUBST (SET_SRC (newpat
),
3251 gen_rtx_COMPARE (compare_mode
, op0
, op1
));
3252 /* Create new version of i2pat if needed; the below PARALLEL
3253 creation needs this to work correctly. */
3254 if (! rtx_equal_p (i2src
, op0
))
3255 i2pat
= gen_rtx_SET (i2dest
, op0
);
3260 if (i2_is_used
== 0)
3262 /* It is possible that the source of I2 or I1 may be performing
3263 an unneeded operation, such as a ZERO_EXTEND of something
3264 that is known to have the high part zero. Handle that case
3265 by letting subst look at the inner insns.
3267 Another way to do this would be to have a function that tries
3268 to simplify a single insn instead of merging two or more
3269 insns. We don't do this because of the potential of infinite
3270 loops and because of the potential extra memory required.
3271 However, doing it the way we are is a bit of a kludge and
3272 doesn't catch all cases.
3274 But only do this if -fexpensive-optimizations since it slows
3275 things down and doesn't usually win.
3277 This is not done in the COMPARE case above because the
3278 unmodified I2PAT is used in the PARALLEL and so a pattern
3279 with a modified I2SRC would not match. */
3281 if (flag_expensive_optimizations
)
3283 /* Pass pc_rtx so no substitutions are done, just
3287 subst_low_luid
= DF_INSN_LUID (i1
);
3288 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3291 subst_low_luid
= DF_INSN_LUID (i2
);
3292 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3295 n_occurrences
= 0; /* `subst' counts here */
3296 subst_low_luid
= DF_INSN_LUID (i2
);
3298 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3299 copy of I2SRC each time we substitute it, in order to avoid creating
3300 self-referential RTL when we will be substituting I1SRC for I1DEST
3301 later. Likewise if I0 feeds into I2, either directly or indirectly
3302 through I1, and I0DEST is in I0SRC. */
3303 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0, 0,
3304 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3305 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3306 && i0dest_in_i0src
));
3309 /* Record whether I2's body now appears within I3's body. */
3310 i2_is_used
= n_occurrences
;
3313 /* If we already got a failure, don't try to do more. Otherwise, try to
3314 substitute I1 if we have it. */
3316 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3318 /* Check that an autoincrement side-effect on I1 has not been lost.
3319 This happens if I1DEST is mentioned in I2 and dies there, and
3320 has disappeared from the new pattern. */
3321 if ((FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3323 && dead_or_set_p (i2
, i1dest
)
3324 && !reg_overlap_mentioned_p (i1dest
, newpat
))
3325 /* Before we can do this substitution, we must redo the test done
3326 above (see detailed comments there) that ensures I1DEST isn't
3327 mentioned in any SETs in NEWPAT that are field assignments. */
3328 || !combinable_i3pat (NULL
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3336 subst_low_luid
= DF_INSN_LUID (i1
);
3338 /* If the following substitution will modify I1SRC, make a copy of it
3339 for the case where it is substituted for I1DEST in I2PAT later. */
3340 if (added_sets_2
&& i1_feeds_i2_n
)
3341 i1src_copy
= copy_rtx (i1src
);
3343 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3344 copy of I1SRC each time we substitute it, in order to avoid creating
3345 self-referential RTL when we will be substituting I0SRC for I0DEST
3347 newpat
= subst (newpat
, i1dest
, i1src
, 0, 0,
3348 i0_feeds_i1_n
&& i0dest_in_i0src
);
3351 /* Record whether I1's body now appears within I3's body. */
3352 i1_is_used
= n_occurrences
;
3355 /* Likewise for I0 if we have it. */
3357 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3359 if ((FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3360 && ((i0_feeds_i2_n
&& dead_or_set_p (i2
, i0dest
))
3361 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
)))
3362 && !reg_overlap_mentioned_p (i0dest
, newpat
))
3363 || !combinable_i3pat (NULL
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3370 /* If the following substitution will modify I0SRC, make a copy of it
3371 for the case where it is substituted for I0DEST in I1PAT later. */
3372 if (added_sets_1
&& i0_feeds_i1_n
)
3373 i0src_copy
= copy_rtx (i0src
);
3374 /* And a copy for I0DEST in I2PAT substitution. */
3375 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3376 || (i0_feeds_i2_n
)))
3377 i0src_copy2
= copy_rtx (i0src
);
3380 subst_low_luid
= DF_INSN_LUID (i0
);
3381 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0, 0);
3385 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3386 to count all the ways that I2SRC and I1SRC can be used. */
3387 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3388 && i2_is_used
+ added_sets_2
> 1)
3389 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3390 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3392 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3393 && (n_occurrences
+ added_sets_0
3394 + (added_sets_1
&& i0_feeds_i1_n
)
3395 + (added_sets_2
&& i0_feeds_i2_n
)
3397 /* Fail if we tried to make a new register. */
3398 || max_reg_num () != maxreg
3399 /* Fail if we couldn't do something and have a CLOBBER. */
3400 || GET_CODE (newpat
) == CLOBBER
3401 /* Fail if this new pattern is a MULT and we didn't have one before
3402 at the outer level. */
3403 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3410 /* If the actions of the earlier insns must be kept
3411 in addition to substituting them into the latest one,
3412 we must make a new PARALLEL for the latest insn
3413 to hold additional the SETs. */
3415 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3417 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3420 if (GET_CODE (newpat
) == PARALLEL
)
3422 rtvec old
= XVEC (newpat
, 0);
3423 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3424 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3425 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3426 sizeof (old
->elem
[0]) * old
->num_elem
);
3431 total_sets
= 1 + extra_sets
;
3432 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3433 XVECEXP (newpat
, 0, 0) = old
;
3437 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3443 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0, 0);
3445 XVECEXP (newpat
, 0, --total_sets
) = t
;
3451 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0, 0,
3452 i0_feeds_i1_n
&& i0dest_in_i0src
);
3453 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3454 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0, 0);
3456 XVECEXP (newpat
, 0, --total_sets
) = t
;
3460 validate_replacement
:
3462 /* Note which hard regs this insn has as inputs. */
3463 mark_used_regs_combine (newpat
);
3465 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3466 consider splitting this pattern, we might need these clobbers. */
3467 if (i1
&& GET_CODE (newpat
) == PARALLEL
3468 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3470 int len
= XVECLEN (newpat
, 0);
3472 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3473 for (i
= 0; i
< len
; i
++)
3474 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3477 /* We have recognized nothing yet. */
3478 insn_code_number
= -1;
3480 /* See if this is a PARALLEL of two SETs where one SET's destination is
3481 a register that is unused and this isn't marked as an instruction that
3482 might trap in an EH region. In that case, we just need the other SET.
3483 We prefer this over the PARALLEL.
3485 This can occur when simplifying a divmod insn. We *must* test for this
3486 case here because the code below that splits two independent SETs doesn't
3487 handle this case correctly when it updates the register status.
3489 It's pointless doing this if we originally had two sets, one from
3490 i3, and one from i2. Combining then splitting the parallel results
3491 in the original i2 again plus an invalid insn (which we delete).
3492 The net effect is only to move instructions around, which makes
3493 debug info less accurate. */
3495 if (!(added_sets_2
&& i1
== 0)
3496 && is_parallel_of_n_reg_sets (newpat
, 2)
3497 && asm_noperands (newpat
) < 0)
3499 rtx set0
= XVECEXP (newpat
, 0, 0);
3500 rtx set1
= XVECEXP (newpat
, 0, 1);
3501 rtx oldpat
= newpat
;
3503 if (((REG_P (SET_DEST (set1
))
3504 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3505 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3506 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3507 && insn_nothrow_p (i3
)
3508 && !side_effects_p (SET_SRC (set1
)))
3511 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3514 else if (((REG_P (SET_DEST (set0
))
3515 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3516 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3517 && find_reg_note (i3
, REG_UNUSED
,
3518 SUBREG_REG (SET_DEST (set0
)))))
3519 && insn_nothrow_p (i3
)
3520 && !side_effects_p (SET_SRC (set0
)))
3523 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3525 if (insn_code_number
>= 0)
3526 changed_i3_dest
= 1;
3529 if (insn_code_number
< 0)
3533 /* Is the result of combination a valid instruction? */
3534 if (insn_code_number
< 0)
3535 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3537 /* If we were combining three insns and the result is a simple SET
3538 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3539 insns. There are two ways to do this. It can be split using a
3540 machine-specific method (like when you have an addition of a large
3541 constant) or by combine in the function find_split_point. */
3543 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3544 && asm_noperands (newpat
) < 0)
3546 rtx parallel
, *split
;
3547 rtx_insn
*m_split_insn
;
3549 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3550 use I2DEST as a scratch register will help. In the latter case,
3551 convert I2DEST to the mode of the source of NEWPAT if we can. */
3553 m_split_insn
= combine_split_insns (newpat
, i3
);
3555 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3556 inputs of NEWPAT. */
3558 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3559 possible to try that as a scratch reg. This would require adding
3560 more code to make it work though. */
3562 if (m_split_insn
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3564 machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3566 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3567 (temporarily, until we are committed to this instruction
3568 combination) does not work: for example, any call to nonzero_bits
3569 on the register (from a splitter in the MD file, for example)
3570 will get the old information, which is invalid.
3572 Since nowadays we can create registers during combine just fine,
3573 we should just create a new one here, not reuse i2dest. */
3575 /* First try to split using the original register as a
3576 scratch register. */
3577 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3578 gen_rtvec (2, newpat
,
3579 gen_rtx_CLOBBER (VOIDmode
,
3581 m_split_insn
= combine_split_insns (parallel
, i3
);
3583 /* If that didn't work, try changing the mode of I2DEST if
3585 if (m_split_insn
== 0
3586 && new_mode
!= GET_MODE (i2dest
)
3587 && new_mode
!= VOIDmode
3588 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3590 machine_mode old_mode
= GET_MODE (i2dest
);
3593 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3594 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3597 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3598 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3601 parallel
= (gen_rtx_PARALLEL
3603 gen_rtvec (2, newpat
,
3604 gen_rtx_CLOBBER (VOIDmode
,
3606 m_split_insn
= combine_split_insns (parallel
, i3
);
3608 if (m_split_insn
== 0
3609 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3613 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3614 buf
= undobuf
.undos
;
3615 undobuf
.undos
= buf
->next
;
3616 buf
->next
= undobuf
.frees
;
3617 undobuf
.frees
= buf
;
3621 i2scratch
= m_split_insn
!= 0;
3624 /* If recog_for_combine has discarded clobbers, try to use them
3625 again for the split. */
3626 if (m_split_insn
== 0 && newpat_vec_with_clobbers
)
3628 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3629 m_split_insn
= combine_split_insns (parallel
, i3
);
3632 if (m_split_insn
&& NEXT_INSN (m_split_insn
) == NULL_RTX
)
3634 rtx m_split_pat
= PATTERN (m_split_insn
);
3635 insn_code_number
= recog_for_combine (&m_split_pat
, i3
, &new_i3_notes
);
3636 if (insn_code_number
>= 0)
3637 newpat
= m_split_pat
;
3639 else if (m_split_insn
&& NEXT_INSN (NEXT_INSN (m_split_insn
)) == NULL_RTX
3640 && (next_nonnote_nondebug_insn (i2
) == i3
3641 || ! use_crosses_set_p (PATTERN (m_split_insn
), DF_INSN_LUID (i2
))))
3644 rtx newi3pat
= PATTERN (NEXT_INSN (m_split_insn
));
3645 newi2pat
= PATTERN (m_split_insn
);
3647 i3set
= single_set (NEXT_INSN (m_split_insn
));
3648 i2set
= single_set (m_split_insn
);
3650 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3652 /* If I2 or I3 has multiple SETs, we won't know how to track
3653 register status, so don't use these insns. If I2's destination
3654 is used between I2 and I3, we also can't use these insns. */
3656 if (i2_code_number
>= 0 && i2set
&& i3set
3657 && (next_nonnote_nondebug_insn (i2
) == i3
3658 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3659 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3661 if (insn_code_number
>= 0)
3664 /* It is possible that both insns now set the destination of I3.
3665 If so, we must show an extra use of it. */
3667 if (insn_code_number
>= 0)
3669 rtx new_i3_dest
= SET_DEST (i3set
);
3670 rtx new_i2_dest
= SET_DEST (i2set
);
3672 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3673 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3674 || GET_CODE (new_i3_dest
) == SUBREG
)
3675 new_i3_dest
= XEXP (new_i3_dest
, 0);
3677 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3678 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3679 || GET_CODE (new_i2_dest
) == SUBREG
)
3680 new_i2_dest
= XEXP (new_i2_dest
, 0);
3682 if (REG_P (new_i3_dest
)
3683 && REG_P (new_i2_dest
)
3684 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
)
3685 && REGNO (new_i2_dest
) < reg_n_sets_max
)
3686 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3690 /* If we can split it and use I2DEST, go ahead and see if that
3691 helps things be recognized. Verify that none of the registers
3692 are set between I2 and I3. */
3693 if (insn_code_number
< 0
3694 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3695 && (!HAVE_cc0
|| REG_P (i2dest
))
3696 /* We need I2DEST in the proper mode. If it is a hard register
3697 or the only use of a pseudo, we can change its mode.
3698 Make sure we don't change a hard register to have a mode that
3699 isn't valid for it, or change the number of registers. */
3700 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3701 || GET_MODE (*split
) == VOIDmode
3702 || can_change_dest_mode (i2dest
, added_sets_2
,
3704 && (next_nonnote_nondebug_insn (i2
) == i3
3705 || ! use_crosses_set_p (*split
, DF_INSN_LUID (i2
)))
3706 /* We can't overwrite I2DEST if its value is still used by
3708 && ! reg_referenced_p (i2dest
, newpat
))
3710 rtx newdest
= i2dest
;
3711 enum rtx_code split_code
= GET_CODE (*split
);
3712 machine_mode split_mode
= GET_MODE (*split
);
3713 bool subst_done
= false;
3714 newi2pat
= NULL_RTX
;
3718 /* *SPLIT may be part of I2SRC, so make sure we have the
3719 original expression around for later debug processing.
3720 We should not need I2SRC any more in other cases. */
3721 if (MAY_HAVE_DEBUG_INSNS
)
3722 i2src
= copy_rtx (i2src
);
3726 /* Get NEWDEST as a register in the proper mode. We have already
3727 validated that we can do this. */
3728 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3730 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3731 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3734 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3735 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3739 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3740 an ASHIFT. This can occur if it was inside a PLUS and hence
3741 appeared to be a memory address. This is a kludge. */
3742 if (split_code
== MULT
3743 && CONST_INT_P (XEXP (*split
, 1))
3744 && INTVAL (XEXP (*split
, 1)) > 0
3745 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3747 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3748 XEXP (*split
, 0), GEN_INT (i
)));
3749 /* Update split_code because we may not have a multiply
3751 split_code
= GET_CODE (*split
);
3754 /* Similarly for (plus (mult FOO (const_int pow2))). */
3755 if (split_code
== PLUS
3756 && GET_CODE (XEXP (*split
, 0)) == MULT
3757 && CONST_INT_P (XEXP (XEXP (*split
, 0), 1))
3758 && INTVAL (XEXP (XEXP (*split
, 0), 1)) > 0
3759 && (i
= exact_log2 (UINTVAL (XEXP (XEXP (*split
, 0), 1)))) >= 0)
3761 rtx nsplit
= XEXP (*split
, 0);
3762 SUBST (XEXP (*split
, 0), gen_rtx_ASHIFT (GET_MODE (nsplit
),
3763 XEXP (nsplit
, 0), GEN_INT (i
)));
3764 /* Update split_code because we may not have a multiply
3766 split_code
= GET_CODE (*split
);
3769 #ifdef INSN_SCHEDULING
3770 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3771 be written as a ZERO_EXTEND. */
3772 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3774 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3775 what it really is. */
3776 if (load_extend_op (GET_MODE (SUBREG_REG (*split
)))
3778 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3779 SUBREG_REG (*split
)));
3781 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3782 SUBREG_REG (*split
)));
3786 /* Attempt to split binary operators using arithmetic identities. */
3787 if (BINARY_P (SET_SRC (newpat
))
3788 && split_mode
== GET_MODE (SET_SRC (newpat
))
3789 && ! side_effects_p (SET_SRC (newpat
)))
3791 rtx setsrc
= SET_SRC (newpat
);
3792 machine_mode mode
= GET_MODE (setsrc
);
3793 enum rtx_code code
= GET_CODE (setsrc
);
3794 rtx src_op0
= XEXP (setsrc
, 0);
3795 rtx src_op1
= XEXP (setsrc
, 1);
3797 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3798 if (rtx_equal_p (src_op0
, src_op1
))
3800 newi2pat
= gen_rtx_SET (newdest
, src_op0
);
3801 SUBST (XEXP (setsrc
, 0), newdest
);
3802 SUBST (XEXP (setsrc
, 1), newdest
);
3805 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3806 else if ((code
== PLUS
|| code
== MULT
)
3807 && GET_CODE (src_op0
) == code
3808 && GET_CODE (XEXP (src_op0
, 0)) == code
3809 && (INTEGRAL_MODE_P (mode
)
3810 || (FLOAT_MODE_P (mode
)
3811 && flag_unsafe_math_optimizations
)))
3813 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3814 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3815 rtx r
= XEXP (src_op0
, 1);
3818 /* Split both "((X op Y) op X) op Y" and
3819 "((X op Y) op Y) op X" as "T op T" where T is
3821 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3822 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3824 newi2pat
= gen_rtx_SET (newdest
, XEXP (src_op0
, 0));
3825 SUBST (XEXP (setsrc
, 0), newdest
);
3826 SUBST (XEXP (setsrc
, 1), newdest
);
3829 /* Split "((X op X) op Y) op Y)" as "T op T" where
3831 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3833 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3834 newi2pat
= gen_rtx_SET (newdest
, tmp
);
3835 SUBST (XEXP (setsrc
, 0), newdest
);
3836 SUBST (XEXP (setsrc
, 1), newdest
);
3844 newi2pat
= gen_rtx_SET (newdest
, *split
);
3845 SUBST (*split
, newdest
);
3848 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3850 /* recog_for_combine might have added CLOBBERs to newi2pat.
3851 Make sure NEWPAT does not depend on the clobbered regs. */
3852 if (GET_CODE (newi2pat
) == PARALLEL
)
3853 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3854 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3856 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3857 if (reg_overlap_mentioned_p (reg
, newpat
))
3864 /* If the split point was a MULT and we didn't have one before,
3865 don't use one now. */
3866 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3867 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3871 /* Check for a case where we loaded from memory in a narrow mode and
3872 then sign extended it, but we need both registers. In that case,
3873 we have a PARALLEL with both loads from the same memory location.
3874 We can split this into a load from memory followed by a register-register
3875 copy. This saves at least one insn, more if register allocation can
3878 We cannot do this if the destination of the first assignment is a
3879 condition code register or cc0. We eliminate this case by making sure
3880 the SET_DEST and SET_SRC have the same mode.
3882 We cannot do this if the destination of the second assignment is
3883 a register that we have already assumed is zero-extended. Similarly
3884 for a SUBREG of such a register. */
3886 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
3887 && GET_CODE (newpat
) == PARALLEL
3888 && XVECLEN (newpat
, 0) == 2
3889 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3890 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
3891 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
3892 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
3893 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3894 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3895 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
3896 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3898 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3899 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3900 && ! (temp_expr
= SET_DEST (XVECEXP (newpat
, 0, 1)),
3902 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3903 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3904 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3905 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3906 != GET_MODE_MASK (word_mode
))))
3907 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
3908 && (temp_expr
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
3910 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3911 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3912 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3913 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3914 != GET_MODE_MASK (word_mode
)))))
3915 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3916 SET_SRC (XVECEXP (newpat
, 0, 1)))
3917 && ! find_reg_note (i3
, REG_UNUSED
,
3918 SET_DEST (XVECEXP (newpat
, 0, 0))))
3922 newi2pat
= XVECEXP (newpat
, 0, 0);
3923 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
3924 newpat
= XVECEXP (newpat
, 0, 1);
3925 SUBST (SET_SRC (newpat
),
3926 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
3927 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3929 if (i2_code_number
>= 0)
3930 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3932 if (insn_code_number
>= 0)
3936 /* Similarly, check for a case where we have a PARALLEL of two independent
3937 SETs but we started with three insns. In this case, we can do the sets
3938 as two separate insns. This case occurs when some SET allows two
3939 other insns to combine, but the destination of that SET is still live.
3941 Also do this if we started with two insns and (at least) one of the
3942 resulting sets is a noop; this noop will be deleted later. */
3944 else if (insn_code_number
< 0 && asm_noperands (newpat
) < 0
3945 && GET_CODE (newpat
) == PARALLEL
3946 && XVECLEN (newpat
, 0) == 2
3947 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3948 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3949 && (i1
|| set_noop_p (XVECEXP (newpat
, 0, 0))
3950 || set_noop_p (XVECEXP (newpat
, 0, 1)))
3951 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
3952 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
3953 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3954 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3955 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3956 XVECEXP (newpat
, 0, 0))
3957 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
3958 XVECEXP (newpat
, 0, 1))
3959 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
3960 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
3962 rtx set0
= XVECEXP (newpat
, 0, 0);
3963 rtx set1
= XVECEXP (newpat
, 0, 1);
3965 /* Normally, it doesn't matter which of the two is done first,
3966 but the one that references cc0 can't be the second, and
3967 one which uses any regs/memory set in between i2 and i3 can't
3968 be first. The PARALLEL might also have been pre-existing in i3,
3969 so we need to make sure that we won't wrongly hoist a SET to i2
3970 that would conflict with a death note present in there. */
3971 if (!use_crosses_set_p (SET_SRC (set1
), DF_INSN_LUID (i2
))
3972 && !(REG_P (SET_DEST (set1
))
3973 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set1
)))
3974 && !(GET_CODE (SET_DEST (set1
)) == SUBREG
3975 && find_reg_note (i2
, REG_DEAD
,
3976 SUBREG_REG (SET_DEST (set1
))))
3977 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set0
))
3978 /* If I3 is a jump, ensure that set0 is a jump so that
3979 we do not create invalid RTL. */
3980 && (!JUMP_P (i3
) || SET_DEST (set0
) == pc_rtx
)
3986 else if (!use_crosses_set_p (SET_SRC (set0
), DF_INSN_LUID (i2
))
3987 && !(REG_P (SET_DEST (set0
))
3988 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set0
)))
3989 && !(GET_CODE (SET_DEST (set0
)) == SUBREG
3990 && find_reg_note (i2
, REG_DEAD
,
3991 SUBREG_REG (SET_DEST (set0
))))
3992 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set1
))
3993 /* If I3 is a jump, ensure that set1 is a jump so that
3994 we do not create invalid RTL. */
3995 && (!JUMP_P (i3
) || SET_DEST (set1
) == pc_rtx
)
4007 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
4009 if (i2_code_number
>= 0)
4011 /* recog_for_combine might have added CLOBBERs to newi2pat.
4012 Make sure NEWPAT does not depend on the clobbered regs. */
4013 if (GET_CODE (newi2pat
) == PARALLEL
)
4015 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
4016 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
4018 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
4019 if (reg_overlap_mentioned_p (reg
, newpat
))
4027 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4031 /* If it still isn't recognized, fail and change things back the way they
4033 if ((insn_code_number
< 0
4034 /* Is the result a reasonable ASM_OPERANDS? */
4035 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
4041 /* If we had to change another insn, make sure it is valid also. */
4042 if (undobuf
.other_insn
)
4044 CLEAR_HARD_REG_SET (newpat_used_regs
);
4046 other_pat
= PATTERN (undobuf
.other_insn
);
4047 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
4050 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
4057 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4058 they are adjacent to each other or not. */
4061 rtx_insn
*p
= prev_nonnote_insn (i3
);
4062 if (p
&& p
!= i2
&& NONJUMP_INSN_P (p
) && newi2pat
4063 && sets_cc0_p (newi2pat
))
4070 /* Only allow this combination if insn_rtx_costs reports that the
4071 replacement instructions are cheaper than the originals. */
4072 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
4078 if (MAY_HAVE_DEBUG_INSNS
)
4082 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
4083 if (undo
->kind
== UNDO_MODE
)
4085 rtx reg
= *undo
->where
.r
;
4086 machine_mode new_mode
= GET_MODE (reg
);
4087 machine_mode old_mode
= undo
->old_contents
.m
;
4089 /* Temporarily revert mode back. */
4090 adjust_reg_mode (reg
, old_mode
);
4092 if (reg
== i2dest
&& i2scratch
)
4094 /* If we used i2dest as a scratch register with a
4095 different mode, substitute it for the original
4096 i2src while its original mode is temporarily
4097 restored, and then clear i2scratch so that we don't
4098 do it again later. */
4099 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
,
4102 /* Put back the new mode. */
4103 adjust_reg_mode (reg
, new_mode
);
4107 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
4108 rtx_insn
*first
, *last
;
4113 last
= last_combined_insn
;
4118 last
= undobuf
.other_insn
;
4120 if (DF_INSN_LUID (last
)
4121 < DF_INSN_LUID (last_combined_insn
))
4122 last
= last_combined_insn
;
4125 /* We're dealing with a reg that changed mode but not
4126 meaning, so we want to turn it into a subreg for
4127 the new mode. However, because of REG sharing and
4128 because its mode had already changed, we have to do
4129 it in two steps. First, replace any debug uses of
4130 reg, with its original mode temporarily restored,
4131 with this copy we have created; then, replace the
4132 copy with the SUBREG of the original shared reg,
4133 once again changed to the new mode. */
4134 propagate_for_debug (first
, last
, reg
, tempreg
,
4136 adjust_reg_mode (reg
, new_mode
);
4137 propagate_for_debug (first
, last
, tempreg
,
4138 lowpart_subreg (old_mode
, reg
, new_mode
),
4144 /* If we will be able to accept this, we have made a
4145 change to the destination of I3. This requires us to
4146 do a few adjustments. */
4148 if (changed_i3_dest
)
4150 PATTERN (i3
) = newpat
;
4151 adjust_for_new_dest (i3
);
4154 /* We now know that we can do this combination. Merge the insns and
4155 update the status of registers and LOG_LINKS. */
4157 if (undobuf
.other_insn
)
4161 PATTERN (undobuf
.other_insn
) = other_pat
;
4163 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4164 ensure that they are still valid. Then add any non-duplicate
4165 notes added by recog_for_combine. */
4166 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
4168 next
= XEXP (note
, 1);
4170 if ((REG_NOTE_KIND (note
) == REG_DEAD
4171 && !reg_referenced_p (XEXP (note
, 0),
4172 PATTERN (undobuf
.other_insn
)))
4173 ||(REG_NOTE_KIND (note
) == REG_UNUSED
4174 && !reg_set_p (XEXP (note
, 0),
4175 PATTERN (undobuf
.other_insn
)))
4176 /* Simply drop equal note since it may be no longer valid
4177 for other_insn. It may be possible to record that CC
4178 register is changed and only discard those notes, but
4179 in practice it's unnecessary complication and doesn't
4180 give any meaningful improvement.
4183 || REG_NOTE_KIND (note
) == REG_EQUAL
4184 || REG_NOTE_KIND (note
) == REG_EQUIV
)
4185 remove_note (undobuf
.other_insn
, note
);
4188 distribute_notes (new_other_notes
, undobuf
.other_insn
,
4189 undobuf
.other_insn
, NULL
, NULL_RTX
, NULL_RTX
,
4196 struct insn_link
*link
;
4199 /* I3 now uses what used to be its destination and which is now
4200 I2's destination. This requires us to do a few adjustments. */
4201 PATTERN (i3
) = newpat
;
4202 adjust_for_new_dest (i3
);
4204 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4207 However, some later insn might be using I2's dest and have
4208 a LOG_LINK pointing at I3. We must remove this link.
4209 The simplest way to remove the link is to point it at I1,
4210 which we know will be a NOTE. */
4212 /* newi2pat is usually a SET here; however, recog_for_combine might
4213 have added some clobbers. */
4214 if (GET_CODE (newi2pat
) == PARALLEL
)
4215 ni2dest
= SET_DEST (XVECEXP (newi2pat
, 0, 0));
4217 ni2dest
= SET_DEST (newi2pat
);
4219 for (insn
= NEXT_INSN (i3
);
4220 insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4221 || insn
!= BB_HEAD (this_basic_block
->next_bb
));
4222 insn
= NEXT_INSN (insn
))
4224 if (NONDEBUG_INSN_P (insn
)
4225 && reg_referenced_p (ni2dest
, PATTERN (insn
)))
4227 FOR_EACH_LOG_LINK (link
, insn
)
4228 if (link
->insn
== i3
)
4237 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4238 struct insn_link
*i3links
, *i2links
, *i1links
= 0, *i0links
= 0;
4241 /* Compute which registers we expect to eliminate. newi2pat may be setting
4242 either i3dest or i2dest, so we must check it. */
4243 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4244 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4247 /* For i1, we need to compute both local elimination and global
4248 elimination information with respect to newi2pat because i1dest
4249 may be the same as i3dest, in which case newi2pat may be setting
4250 i1dest. Global information is used when distributing REG_DEAD
4251 note for i2 and i3, in which case it does matter if newi2pat sets
4254 Local information is used when distributing REG_DEAD note for i1,
4255 in which case it doesn't matter if newi2pat sets i1dest or not.
4256 See PR62151, if we have four insns combination:
4258 i1: r1 <- i1src (using r0)
4260 i2: r0 <- i2src (using r1)
4261 i3: r3 <- i3src (using r0)
4263 From i1's point of view, r0 is eliminated, no matter if it is set
4264 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4265 should be discarded.
4267 Note local information only affects cases in forms like "I1->I2->I3",
4268 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4269 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4271 rtx local_elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4274 rtx elim_i1
= (local_elim_i1
== 0
4275 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4277 /* Same case as i1. */
4278 rtx local_elim_i0
= (i0
== 0 || i0dest_in_i0src
|| !i0dest_killed
4280 rtx elim_i0
= (local_elim_i0
== 0
4281 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4284 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4286 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4287 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4289 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4291 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4293 /* Ensure that we do not have something that should not be shared but
4294 occurs multiple times in the new insns. Check this by first
4295 resetting all the `used' flags and then copying anything is shared. */
4297 reset_used_flags (i3notes
);
4298 reset_used_flags (i2notes
);
4299 reset_used_flags (i1notes
);
4300 reset_used_flags (i0notes
);
4301 reset_used_flags (newpat
);
4302 reset_used_flags (newi2pat
);
4303 if (undobuf
.other_insn
)
4304 reset_used_flags (PATTERN (undobuf
.other_insn
));
4306 i3notes
= copy_rtx_if_shared (i3notes
);
4307 i2notes
= copy_rtx_if_shared (i2notes
);
4308 i1notes
= copy_rtx_if_shared (i1notes
);
4309 i0notes
= copy_rtx_if_shared (i0notes
);
4310 newpat
= copy_rtx_if_shared (newpat
);
4311 newi2pat
= copy_rtx_if_shared (newi2pat
);
4312 if (undobuf
.other_insn
)
4313 reset_used_flags (PATTERN (undobuf
.other_insn
));
4315 INSN_CODE (i3
) = insn_code_number
;
4316 PATTERN (i3
) = newpat
;
4318 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4320 for (rtx link
= CALL_INSN_FUNCTION_USAGE (i3
); link
;
4321 link
= XEXP (link
, 1))
4325 /* I2SRC must still be meaningful at this point. Some
4326 splitting operations can invalidate I2SRC, but those
4327 operations do not apply to calls. */
4329 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4333 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4336 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4341 if (undobuf
.other_insn
)
4342 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4344 /* We had one special case above where I2 had more than one set and
4345 we replaced a destination of one of those sets with the destination
4346 of I3. In that case, we have to update LOG_LINKS of insns later
4347 in this basic block. Note that this (expensive) case is rare.
4349 Also, in this case, we must pretend that all REG_NOTEs for I2
4350 actually came from I3, so that REG_UNUSED notes from I2 will be
4351 properly handled. */
4353 if (i3_subst_into_i2
)
4355 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4356 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4357 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4358 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4359 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4360 && ! find_reg_note (i2
, REG_UNUSED
,
4361 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4362 for (temp_insn
= NEXT_INSN (i2
);
4364 && (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4365 || BB_HEAD (this_basic_block
) != temp_insn
);
4366 temp_insn
= NEXT_INSN (temp_insn
))
4367 if (temp_insn
!= i3
&& NONDEBUG_INSN_P (temp_insn
))
4368 FOR_EACH_LOG_LINK (link
, temp_insn
)
4369 if (link
->insn
== i2
)
4375 while (XEXP (link
, 1))
4376 link
= XEXP (link
, 1);
4377 XEXP (link
, 1) = i2notes
;
4384 LOG_LINKS (i3
) = NULL
;
4386 LOG_LINKS (i2
) = NULL
;
4391 if (MAY_HAVE_DEBUG_INSNS
&& i2scratch
)
4392 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4394 INSN_CODE (i2
) = i2_code_number
;
4395 PATTERN (i2
) = newi2pat
;
4399 if (MAY_HAVE_DEBUG_INSNS
&& i2src
)
4400 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4402 SET_INSN_DELETED (i2
);
4407 LOG_LINKS (i1
) = NULL
;
4409 if (MAY_HAVE_DEBUG_INSNS
)
4410 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
,
4412 SET_INSN_DELETED (i1
);
4417 LOG_LINKS (i0
) = NULL
;
4419 if (MAY_HAVE_DEBUG_INSNS
)
4420 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
,
4422 SET_INSN_DELETED (i0
);
4425 /* Get death notes for everything that is now used in either I3 or
4426 I2 and used to die in a previous insn. If we built two new
4427 patterns, move from I1 to I2 then I2 to I3 so that we get the
4428 proper movement on registers that I2 modifies. */
4431 from_luid
= DF_INSN_LUID (i0
);
4433 from_luid
= DF_INSN_LUID (i1
);
4435 from_luid
= DF_INSN_LUID (i2
);
4437 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4438 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4440 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4442 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL
,
4443 elim_i2
, elim_i1
, elim_i0
);
4445 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL
,
4446 elim_i2
, elim_i1
, elim_i0
);
4448 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL
,
4449 elim_i2
, local_elim_i1
, local_elim_i0
);
4451 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL
,
4452 elim_i2
, elim_i1
, local_elim_i0
);
4454 distribute_notes (midnotes
, NULL
, i3
, newi2pat
? i2
: NULL
,
4455 elim_i2
, elim_i1
, elim_i0
);
4457 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4458 know these are REG_UNUSED and want them to go to the desired insn,
4459 so we always pass it as i3. */
4461 if (newi2pat
&& new_i2_notes
)
4462 distribute_notes (new_i2_notes
, i2
, i2
, NULL
, NULL_RTX
, NULL_RTX
,
4466 distribute_notes (new_i3_notes
, i3
, i3
, NULL
, NULL_RTX
, NULL_RTX
,
4469 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4470 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4471 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4472 in that case, it might delete I2. Similarly for I2 and I1.
4473 Show an additional death due to the REG_DEAD note we make here. If
4474 we discard it in distribute_notes, we will decrement it again. */
4478 rtx new_note
= alloc_reg_note (REG_DEAD
, i3dest_killed
, NULL_RTX
);
4479 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4480 distribute_notes (new_note
, NULL
, i2
, NULL
, elim_i2
,
4483 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4484 elim_i2
, elim_i1
, elim_i0
);
4487 if (i2dest_in_i2src
)
4489 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4490 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4491 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4492 NULL_RTX
, NULL_RTX
);
4494 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4495 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4498 if (i1dest_in_i1src
)
4500 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4501 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4502 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4503 NULL_RTX
, NULL_RTX
);
4505 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4506 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4509 if (i0dest_in_i0src
)
4511 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4512 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4513 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4514 NULL_RTX
, NULL_RTX
);
4516 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4517 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4520 distribute_links (i3links
);
4521 distribute_links (i2links
);
4522 distribute_links (i1links
);
4523 distribute_links (i0links
);
4527 struct insn_link
*link
;
4528 rtx_insn
*i2_insn
= 0;
4529 rtx i2_val
= 0, set
;
4531 /* The insn that used to set this register doesn't exist, and
4532 this life of the register may not exist either. See if one of
4533 I3's links points to an insn that sets I2DEST. If it does,
4534 that is now the last known value for I2DEST. If we don't update
4535 this and I2 set the register to a value that depended on its old
4536 contents, we will get confused. If this insn is used, thing
4537 will be set correctly in combine_instructions. */
4538 FOR_EACH_LOG_LINK (link
, i3
)
4539 if ((set
= single_set (link
->insn
)) != 0
4540 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4541 i2_insn
= link
->insn
, i2_val
= SET_SRC (set
);
4543 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4545 /* If the reg formerly set in I2 died only once and that was in I3,
4546 zero its use count so it won't make `reload' do any work. */
4548 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4549 && ! i2dest_in_i2src
4550 && REGNO (i2dest
) < reg_n_sets_max
)
4551 INC_REG_N_SETS (REGNO (i2dest
), -1);
4554 if (i1
&& REG_P (i1dest
))
4556 struct insn_link
*link
;
4557 rtx_insn
*i1_insn
= 0;
4558 rtx i1_val
= 0, set
;
4560 FOR_EACH_LOG_LINK (link
, i3
)
4561 if ((set
= single_set (link
->insn
)) != 0
4562 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4563 i1_insn
= link
->insn
, i1_val
= SET_SRC (set
);
4565 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4568 && ! i1dest_in_i1src
4569 && REGNO (i1dest
) < reg_n_sets_max
)
4570 INC_REG_N_SETS (REGNO (i1dest
), -1);
4573 if (i0
&& REG_P (i0dest
))
4575 struct insn_link
*link
;
4576 rtx_insn
*i0_insn
= 0;
4577 rtx i0_val
= 0, set
;
4579 FOR_EACH_LOG_LINK (link
, i3
)
4580 if ((set
= single_set (link
->insn
)) != 0
4581 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4582 i0_insn
= link
->insn
, i0_val
= SET_SRC (set
);
4584 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4587 && ! i0dest_in_i0src
4588 && REGNO (i0dest
) < reg_n_sets_max
)
4589 INC_REG_N_SETS (REGNO (i0dest
), -1);
4592 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4593 been made to this insn. The order is important, because newi2pat
4594 can affect nonzero_bits of newpat. */
4596 note_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4597 note_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4600 if (undobuf
.other_insn
!= NULL_RTX
)
4604 fprintf (dump_file
, "modifying other_insn ");
4605 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4607 df_insn_rescan (undobuf
.other_insn
);
4610 if (i0
&& !(NOTE_P (i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4614 fprintf (dump_file
, "modifying insn i0 ");
4615 dump_insn_slim (dump_file
, i0
);
4617 df_insn_rescan (i0
);
4620 if (i1
&& !(NOTE_P (i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4624 fprintf (dump_file
, "modifying insn i1 ");
4625 dump_insn_slim (dump_file
, i1
);
4627 df_insn_rescan (i1
);
4630 if (i2
&& !(NOTE_P (i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4634 fprintf (dump_file
, "modifying insn i2 ");
4635 dump_insn_slim (dump_file
, i2
);
4637 df_insn_rescan (i2
);
4640 if (i3
&& !(NOTE_P (i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4644 fprintf (dump_file
, "modifying insn i3 ");
4645 dump_insn_slim (dump_file
, i3
);
4647 df_insn_rescan (i3
);
4650 /* Set new_direct_jump_p if a new return or simple jump instruction
4651 has been created. Adjust the CFG accordingly. */
4652 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4654 *new_direct_jump_p
= 1;
4655 mark_jump_label (PATTERN (i3
), i3
, 0);
4656 update_cfg_for_uncondjump (i3
);
4659 if (undobuf
.other_insn
!= NULL_RTX
4660 && (returnjump_p (undobuf
.other_insn
)
4661 || any_uncondjump_p (undobuf
.other_insn
)))
4663 *new_direct_jump_p
= 1;
4664 update_cfg_for_uncondjump (undobuf
.other_insn
);
4667 if (GET_CODE (PATTERN (i3
)) == TRAP_IF
4668 && XEXP (PATTERN (i3
), 0) == const1_rtx
)
4670 basic_block bb
= BLOCK_FOR_INSN (i3
);
4672 remove_edge (split_block (bb
, i3
));
4673 emit_barrier_after_bb (bb
);
4674 *new_direct_jump_p
= 1;
4677 if (undobuf
.other_insn
4678 && GET_CODE (PATTERN (undobuf
.other_insn
)) == TRAP_IF
4679 && XEXP (PATTERN (undobuf
.other_insn
), 0) == const1_rtx
)
4681 basic_block bb
= BLOCK_FOR_INSN (undobuf
.other_insn
);
4683 remove_edge (split_block (bb
, undobuf
.other_insn
));
4684 emit_barrier_after_bb (bb
);
4685 *new_direct_jump_p
= 1;
4688 /* A noop might also need cleaning up of CFG, if it comes from the
4689 simplification of a jump. */
4691 && GET_CODE (newpat
) == SET
4692 && SET_SRC (newpat
) == pc_rtx
4693 && SET_DEST (newpat
) == pc_rtx
)
4695 *new_direct_jump_p
= 1;
4696 update_cfg_for_uncondjump (i3
);
4699 if (undobuf
.other_insn
!= NULL_RTX
4700 && JUMP_P (undobuf
.other_insn
)
4701 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4702 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4703 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4705 *new_direct_jump_p
= 1;
4706 update_cfg_for_uncondjump (undobuf
.other_insn
);
4709 combine_successes
++;
4712 if (added_links_insn
4713 && (newi2pat
== 0 || DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i2
))
4714 && DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i3
))
4715 return added_links_insn
;
4717 return newi2pat
? i2
: i3
;
4720 /* Get a marker for undoing to the current state. */
4723 get_undo_marker (void)
4725 return undobuf
.undos
;
4728 /* Undo the modifications up to the marker. */
4731 undo_to_marker (void *marker
)
4733 struct undo
*undo
, *next
;
4735 for (undo
= undobuf
.undos
; undo
!= marker
; undo
= next
)
4743 *undo
->where
.r
= undo
->old_contents
.r
;
4746 *undo
->where
.i
= undo
->old_contents
.i
;
4749 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4752 *undo
->where
.l
= undo
->old_contents
.l
;
4758 undo
->next
= undobuf
.frees
;
4759 undobuf
.frees
= undo
;
4762 undobuf
.undos
= (struct undo
*) marker
;
4765 /* Undo all the modifications recorded in undobuf. */
4773 /* We've committed to accepting the changes we made. Move all
4774 of the undos to the free list. */
4779 struct undo
*undo
, *next
;
4781 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4784 undo
->next
= undobuf
.frees
;
4785 undobuf
.frees
= undo
;
4790 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4791 where we have an arithmetic expression and return that point. LOC will
4794 try_combine will call this function to see if an insn can be split into
4798 find_split_point (rtx
*loc
, rtx_insn
*insn
, bool set_src
)
4801 enum rtx_code code
= GET_CODE (x
);
4803 unsigned HOST_WIDE_INT len
= 0;
4804 HOST_WIDE_INT pos
= 0;
4806 rtx inner
= NULL_RTX
;
4807 scalar_int_mode mode
, inner_mode
;
4809 /* First special-case some codes. */
4813 #ifdef INSN_SCHEDULING
4814 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4816 if (MEM_P (SUBREG_REG (x
)))
4819 return find_split_point (&SUBREG_REG (x
), insn
, false);
4822 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4823 using LO_SUM and HIGH. */
4824 if (HAVE_lo_sum
&& (GET_CODE (XEXP (x
, 0)) == CONST
4825 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
))
4827 machine_mode address_mode
= get_address_mode (x
);
4830 gen_rtx_LO_SUM (address_mode
,
4831 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4833 return &XEXP (XEXP (x
, 0), 0);
4836 /* If we have a PLUS whose second operand is a constant and the
4837 address is not valid, perhaps will can split it up using
4838 the machine-specific way to split large constants. We use
4839 the first pseudo-reg (one of the virtual regs) as a placeholder;
4840 it will not remain in the result. */
4841 if (GET_CODE (XEXP (x
, 0)) == PLUS
4842 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4843 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4844 MEM_ADDR_SPACE (x
)))
4846 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4847 rtx_insn
*seq
= combine_split_insns (gen_rtx_SET (reg
, XEXP (x
, 0)),
4850 /* This should have produced two insns, each of which sets our
4851 placeholder. If the source of the second is a valid address,
4852 we can make put both sources together and make a split point
4856 && NEXT_INSN (seq
) != NULL_RTX
4857 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
4858 && NONJUMP_INSN_P (seq
)
4859 && GET_CODE (PATTERN (seq
)) == SET
4860 && SET_DEST (PATTERN (seq
)) == reg
4861 && ! reg_mentioned_p (reg
,
4862 SET_SRC (PATTERN (seq
)))
4863 && NONJUMP_INSN_P (NEXT_INSN (seq
))
4864 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
4865 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
4866 && memory_address_addr_space_p
4867 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
4868 MEM_ADDR_SPACE (x
)))
4870 rtx src1
= SET_SRC (PATTERN (seq
));
4871 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
4873 /* Replace the placeholder in SRC2 with SRC1. If we can
4874 find where in SRC2 it was placed, that can become our
4875 split point and we can replace this address with SRC2.
4876 Just try two obvious places. */
4878 src2
= replace_rtx (src2
, reg
, src1
);
4880 if (XEXP (src2
, 0) == src1
)
4881 split
= &XEXP (src2
, 0);
4882 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
4883 && XEXP (XEXP (src2
, 0), 0) == src1
)
4884 split
= &XEXP (XEXP (src2
, 0), 0);
4888 SUBST (XEXP (x
, 0), src2
);
4893 /* If that didn't work, perhaps the first operand is complex and
4894 needs to be computed separately, so make a split point there.
4895 This will occur on machines that just support REG + CONST
4896 and have a constant moved through some previous computation. */
4898 else if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
4899 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4900 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4901 return &XEXP (XEXP (x
, 0), 0);
4904 /* If we have a PLUS whose first operand is complex, try computing it
4905 separately by making a split there. */
4906 if (GET_CODE (XEXP (x
, 0)) == PLUS
4907 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4909 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
4910 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4911 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4912 return &XEXP (XEXP (x
, 0), 0);
4916 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4917 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4918 we need to put the operand into a register. So split at that
4921 if (SET_DEST (x
) == cc0_rtx
4922 && GET_CODE (SET_SRC (x
)) != COMPARE
4923 && GET_CODE (SET_SRC (x
)) != ZERO_EXTRACT
4924 && !OBJECT_P (SET_SRC (x
))
4925 && ! (GET_CODE (SET_SRC (x
)) == SUBREG
4926 && OBJECT_P (SUBREG_REG (SET_SRC (x
)))))
4927 return &SET_SRC (x
);
4929 /* See if we can split SET_SRC as it stands. */
4930 split
= find_split_point (&SET_SRC (x
), insn
, true);
4931 if (split
&& split
!= &SET_SRC (x
))
4934 /* See if we can split SET_DEST as it stands. */
4935 split
= find_split_point (&SET_DEST (x
), insn
, false);
4936 if (split
&& split
!= &SET_DEST (x
))
4939 /* See if this is a bitfield assignment with everything constant. If
4940 so, this is an IOR of an AND, so split it into that. */
4941 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
4942 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_DEST (x
), 0)),
4944 && HWI_COMPUTABLE_MODE_P (inner_mode
)
4945 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
4946 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
4947 && CONST_INT_P (SET_SRC (x
))
4948 && ((INTVAL (XEXP (SET_DEST (x
), 1))
4949 + INTVAL (XEXP (SET_DEST (x
), 2)))
4950 <= GET_MODE_PRECISION (inner_mode
))
4951 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
4953 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
4954 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
4955 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
));
4956 rtx dest
= XEXP (SET_DEST (x
), 0);
4957 unsigned HOST_WIDE_INT mask
4958 = (HOST_WIDE_INT_1U
<< len
) - 1;
4961 if (BITS_BIG_ENDIAN
)
4962 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
4964 or_mask
= gen_int_mode (src
<< pos
, inner_mode
);
4967 simplify_gen_binary (IOR
, inner_mode
, dest
, or_mask
));
4970 rtx negmask
= gen_int_mode (~(mask
<< pos
), inner_mode
);
4972 simplify_gen_binary (IOR
, inner_mode
,
4973 simplify_gen_binary (AND
, inner_mode
,
4978 SUBST (SET_DEST (x
), dest
);
4980 split
= find_split_point (&SET_SRC (x
), insn
, true);
4981 if (split
&& split
!= &SET_SRC (x
))
4985 /* Otherwise, see if this is an operation that we can split into two.
4986 If so, try to split that. */
4987 code
= GET_CODE (SET_SRC (x
));
4992 /* If we are AND'ing with a large constant that is only a single
4993 bit and the result is only being used in a context where we
4994 need to know if it is zero or nonzero, replace it with a bit
4995 extraction. This will avoid the large constant, which might
4996 have taken more than one insn to make. If the constant were
4997 not a valid argument to the AND but took only one insn to make,
4998 this is no worse, but if it took more than one insn, it will
5001 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
5002 && REG_P (XEXP (SET_SRC (x
), 0))
5003 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
5004 && REG_P (SET_DEST (x
))
5005 && (split
= find_single_use (SET_DEST (x
), insn
, NULL
)) != 0
5006 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
5007 && XEXP (*split
, 0) == SET_DEST (x
)
5008 && XEXP (*split
, 1) == const0_rtx
)
5010 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
5011 XEXP (SET_SRC (x
), 0),
5012 pos
, NULL_RTX
, 1, 1, 0, 0);
5013 if (extraction
!= 0)
5015 SUBST (SET_SRC (x
), extraction
);
5016 return find_split_point (loc
, insn
, false);
5022 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5023 is known to be on, this can be converted into a NEG of a shift. */
5024 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
5025 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
5026 && 1 <= (pos
= exact_log2
5027 (nonzero_bits (XEXP (SET_SRC (x
), 0),
5028 GET_MODE (XEXP (SET_SRC (x
), 0))))))
5030 machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
5034 gen_rtx_LSHIFTRT (mode
,
5035 XEXP (SET_SRC (x
), 0),
5038 split
= find_split_point (&SET_SRC (x
), insn
, true);
5039 if (split
&& split
!= &SET_SRC (x
))
5045 inner
= XEXP (SET_SRC (x
), 0);
5047 /* We can't optimize if either mode is a partial integer
5048 mode as we don't know how many bits are significant
5050 if (!is_int_mode (GET_MODE (inner
), &inner_mode
)
5051 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
5055 len
= GET_MODE_PRECISION (inner_mode
);
5061 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_SRC (x
), 0)),
5063 && CONST_INT_P (XEXP (SET_SRC (x
), 1))
5064 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
5066 inner
= XEXP (SET_SRC (x
), 0);
5067 len
= INTVAL (XEXP (SET_SRC (x
), 1));
5068 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
5070 if (BITS_BIG_ENDIAN
)
5071 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
5072 unsignedp
= (code
== ZERO_EXTRACT
);
5081 && pos
+ len
<= GET_MODE_PRECISION (GET_MODE (inner
))
5082 && is_a
<scalar_int_mode
> (GET_MODE (SET_SRC (x
)), &mode
))
5084 /* For unsigned, we have a choice of a shift followed by an
5085 AND or two shifts. Use two shifts for field sizes where the
5086 constant might be too large. We assume here that we can
5087 always at least get 8-bit constants in an AND insn, which is
5088 true for every current RISC. */
5090 if (unsignedp
&& len
<= 8)
5092 unsigned HOST_WIDE_INT mask
5093 = (HOST_WIDE_INT_1U
<< len
) - 1;
5097 (mode
, gen_lowpart (mode
, inner
),
5099 gen_int_mode (mask
, mode
)));
5101 split
= find_split_point (&SET_SRC (x
), insn
, true);
5102 if (split
&& split
!= &SET_SRC (x
))
5109 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
5110 gen_rtx_ASHIFT (mode
,
5111 gen_lowpart (mode
, inner
),
5112 GEN_INT (GET_MODE_PRECISION (mode
)
5114 GEN_INT (GET_MODE_PRECISION (mode
) - len
)));
5116 split
= find_split_point (&SET_SRC (x
), insn
, true);
5117 if (split
&& split
!= &SET_SRC (x
))
5122 /* See if this is a simple operation with a constant as the second
5123 operand. It might be that this constant is out of range and hence
5124 could be used as a split point. */
5125 if (BINARY_P (SET_SRC (x
))
5126 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
5127 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
5128 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
5129 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
5130 return &XEXP (SET_SRC (x
), 1);
5132 /* Finally, see if this is a simple operation with its first operand
5133 not in a register. The operation might require this operand in a
5134 register, so return it as a split point. We can always do this
5135 because if the first operand were another operation, we would have
5136 already found it as a split point. */
5137 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
5138 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
5139 return &XEXP (SET_SRC (x
), 0);
5145 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5146 it is better to write this as (not (ior A B)) so we can split it.
5147 Similarly for IOR. */
5148 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
5151 gen_rtx_NOT (GET_MODE (x
),
5152 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
5154 XEXP (XEXP (x
, 0), 0),
5155 XEXP (XEXP (x
, 1), 0))));
5156 return find_split_point (loc
, insn
, set_src
);
5159 /* Many RISC machines have a large set of logical insns. If the
5160 second operand is a NOT, put it first so we will try to split the
5161 other operand first. */
5162 if (GET_CODE (XEXP (x
, 1)) == NOT
)
5164 rtx tem
= XEXP (x
, 0);
5165 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5166 SUBST (XEXP (x
, 1), tem
);
5172 /* Canonicalization can produce (minus A (mult B C)), where C is a
5173 constant. It may be better to try splitting (plus (mult B -C) A)
5174 instead if this isn't a multiply by a power of two. */
5175 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
5176 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5177 && !pow2p_hwi (INTVAL (XEXP (XEXP (x
, 1), 1))))
5179 machine_mode mode
= GET_MODE (x
);
5180 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
5181 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
5182 SUBST (*loc
, gen_rtx_PLUS (mode
,
5184 XEXP (XEXP (x
, 1), 0),
5185 gen_int_mode (other_int
,
5188 return find_split_point (loc
, insn
, set_src
);
5191 /* Split at a multiply-accumulate instruction. However if this is
5192 the SET_SRC, we likely do not have such an instruction and it's
5193 worthless to try this split. */
5195 && (GET_CODE (XEXP (x
, 0)) == MULT
5196 || (GET_CODE (XEXP (x
, 0)) == ASHIFT
5197 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
5204 /* Otherwise, select our actions depending on our rtx class. */
5205 switch (GET_RTX_CLASS (code
))
5207 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5209 split
= find_split_point (&XEXP (x
, 2), insn
, false);
5214 case RTX_COMM_ARITH
:
5216 case RTX_COMM_COMPARE
:
5217 split
= find_split_point (&XEXP (x
, 1), insn
, false);
5222 /* Some machines have (and (shift ...) ...) insns. If X is not
5223 an AND, but XEXP (X, 0) is, use it as our split point. */
5224 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
5225 return &XEXP (x
, 0);
5227 split
= find_split_point (&XEXP (x
, 0), insn
, false);
5233 /* Otherwise, we don't have a split point. */
5238 /* Throughout X, replace FROM with TO, and return the result.
5239 The result is TO if X is FROM;
5240 otherwise the result is X, but its contents may have been modified.
5241 If they were modified, a record was made in undobuf so that
5242 undo_all will (among other things) return X to its original state.
5244 If the number of changes necessary is too much to record to undo,
5245 the excess changes are not made, so the result is invalid.
5246 The changes already made can still be undone.
5247 undobuf.num_undo is incremented for such changes, so by testing that
5248 the caller can tell whether the result is valid.
5250 `n_occurrences' is incremented each time FROM is replaced.
5252 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5254 IN_COND is nonzero if we are at the top level of a condition.
5256 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5257 by copying if `n_occurrences' is nonzero. */
5260 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int in_cond
, int unique_copy
)
5262 enum rtx_code code
= GET_CODE (x
);
5263 machine_mode op0_mode
= VOIDmode
;
5268 /* Two expressions are equal if they are identical copies of a shared
5269 RTX or if they are both registers with the same register number
5272 #define COMBINE_RTX_EQUAL_P(X,Y) \
5274 || (REG_P (X) && REG_P (Y) \
5275 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5277 /* Do not substitute into clobbers of regs -- this will never result in
5279 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
5282 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
5285 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
5288 /* If X and FROM are the same register but different modes, they
5289 will not have been seen as equal above. However, the log links code
5290 will make a LOG_LINKS entry for that case. If we do nothing, we
5291 will try to rerecognize our original insn and, when it succeeds,
5292 we will delete the feeding insn, which is incorrect.
5294 So force this insn not to match in this (rare) case. */
5295 if (! in_dest
&& code
== REG
&& REG_P (from
)
5296 && reg_overlap_mentioned_p (x
, from
))
5297 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5299 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5300 of which may contain things that can be combined. */
5301 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5304 /* It is possible to have a subexpression appear twice in the insn.
5305 Suppose that FROM is a register that appears within TO.
5306 Then, after that subexpression has been scanned once by `subst',
5307 the second time it is scanned, TO may be found. If we were
5308 to scan TO here, we would find FROM within it and create a
5309 self-referent rtl structure which is completely wrong. */
5310 if (COMBINE_RTX_EQUAL_P (x
, to
))
5313 /* Parallel asm_operands need special attention because all of the
5314 inputs are shared across the arms. Furthermore, unsharing the
5315 rtl results in recognition failures. Failure to handle this case
5316 specially can result in circular rtl.
5318 Solve this by doing a normal pass across the first entry of the
5319 parallel, and only processing the SET_DESTs of the subsequent
5322 if (code
== PARALLEL
5323 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5324 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5326 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, 0, unique_copy
);
5328 /* If this substitution failed, this whole thing fails. */
5329 if (GET_CODE (new_rtx
) == CLOBBER
5330 && XEXP (new_rtx
, 0) == const0_rtx
)
5333 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5335 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5337 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5340 && GET_CODE (dest
) != CC0
5341 && GET_CODE (dest
) != PC
)
5343 new_rtx
= subst (dest
, from
, to
, 0, 0, unique_copy
);
5345 /* If this substitution failed, this whole thing fails. */
5346 if (GET_CODE (new_rtx
) == CLOBBER
5347 && XEXP (new_rtx
, 0) == const0_rtx
)
5350 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5356 len
= GET_RTX_LENGTH (code
);
5357 fmt
= GET_RTX_FORMAT (code
);
5359 /* We don't need to process a SET_DEST that is a register, CC0,
5360 or PC, so set up to skip this common case. All other cases
5361 where we want to suppress replacing something inside a
5362 SET_SRC are handled via the IN_DEST operand. */
5364 && (REG_P (SET_DEST (x
))
5365 || GET_CODE (SET_DEST (x
)) == CC0
5366 || GET_CODE (SET_DEST (x
)) == PC
))
5369 /* Trying to simplify the operands of a widening MULT is not likely
5370 to create RTL matching a machine insn. */
5372 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
5373 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
5374 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
5375 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
5376 && REG_P (XEXP (XEXP (x
, 0), 0))
5377 && REG_P (XEXP (XEXP (x
, 1), 0))
5382 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5385 op0_mode
= GET_MODE (XEXP (x
, 0));
5387 for (i
= 0; i
< len
; i
++)
5392 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5394 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5396 new_rtx
= (unique_copy
&& n_occurrences
5397 ? copy_rtx (to
) : to
);
5402 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0, 0,
5405 /* If this substitution failed, this whole thing
5407 if (GET_CODE (new_rtx
) == CLOBBER
5408 && XEXP (new_rtx
, 0) == const0_rtx
)
5412 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5415 else if (fmt
[i
] == 'e')
5417 /* If this is a register being set, ignore it. */
5418 new_rtx
= XEXP (x
, i
);
5421 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5423 || code
== STRICT_LOW_PART
))
5426 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5428 /* In general, don't install a subreg involving two
5429 modes not tieable. It can worsen register
5430 allocation, and can even make invalid reload
5431 insns, since the reg inside may need to be copied
5432 from in the outside mode, and that may be invalid
5433 if it is an fp reg copied in integer mode.
5435 We allow two exceptions to this: It is valid if
5436 it is inside another SUBREG and the mode of that
5437 SUBREG and the mode of the inside of TO is
5438 tieable and it is valid if X is a SET that copies
5441 if (GET_CODE (to
) == SUBREG
5442 && ! MODES_TIEABLE_P (GET_MODE (to
),
5443 GET_MODE (SUBREG_REG (to
)))
5444 && ! (code
== SUBREG
5445 && MODES_TIEABLE_P (GET_MODE (x
),
5446 GET_MODE (SUBREG_REG (to
))))
5450 && XEXP (x
, 0) == cc0_rtx
))))
5451 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5455 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5456 && simplify_subreg_regno (REGNO (to
), GET_MODE (to
),
5459 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5461 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5465 /* If we are in a SET_DEST, suppress most cases unless we
5466 have gone inside a MEM, in which case we want to
5467 simplify the address. We assume here that things that
5468 are actually part of the destination have their inner
5469 parts in the first expression. This is true for SUBREG,
5470 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5471 things aside from REG and MEM that should appear in a
5473 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5475 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5476 || code
== ZERO_EXTRACT
))
5479 code
== IF_THEN_ELSE
&& i
== 0,
5482 /* If we found that we will have to reject this combination,
5483 indicate that by returning the CLOBBER ourselves, rather than
5484 an expression containing it. This will speed things up as
5485 well as prevent accidents where two CLOBBERs are considered
5486 to be equal, thus producing an incorrect simplification. */
5488 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5491 if (GET_CODE (x
) == SUBREG
&& CONST_SCALAR_INT_P (new_rtx
))
5493 machine_mode mode
= GET_MODE (x
);
5495 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5496 GET_MODE (SUBREG_REG (x
)),
5499 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5501 else if (CONST_SCALAR_INT_P (new_rtx
)
5502 && GET_CODE (x
) == ZERO_EXTEND
)
5504 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
5505 new_rtx
, GET_MODE (XEXP (x
, 0)));
5509 SUBST (XEXP (x
, i
), new_rtx
);
5514 /* Check if we are loading something from the constant pool via float
5515 extension; in this case we would undo compress_float_constant
5516 optimization and degenerate constant load to an immediate value. */
5517 if (GET_CODE (x
) == FLOAT_EXTEND
5518 && MEM_P (XEXP (x
, 0))
5519 && MEM_READONLY_P (XEXP (x
, 0)))
5521 rtx tmp
= avoid_constant_pool_reference (x
);
5526 /* Try to simplify X. If the simplification changed the code, it is likely
5527 that further simplification will help, so loop, but limit the number
5528 of repetitions that will be performed. */
5530 for (i
= 0; i
< 4; i
++)
5532 /* If X is sufficiently simple, don't bother trying to do anything
5534 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5535 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
, in_cond
);
5537 if (GET_CODE (x
) == code
)
5540 code
= GET_CODE (x
);
5542 /* We no longer know the original mode of operand 0 since we
5543 have changed the form of X) */
5544 op0_mode
= VOIDmode
;
5550 /* If X is a commutative operation whose operands are not in the canonical
5551 order, use substitutions to swap them. */
5554 maybe_swap_commutative_operands (rtx x
)
5556 if (COMMUTATIVE_ARITH_P (x
)
5557 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5559 rtx temp
= XEXP (x
, 0);
5560 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5561 SUBST (XEXP (x
, 1), temp
);
5565 /* Simplify X, a piece of RTL. We just operate on the expression at the
5566 outer level; call `subst' to simplify recursively. Return the new
5569 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5570 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5574 combine_simplify_rtx (rtx x
, machine_mode op0_mode
, int in_dest
,
5577 enum rtx_code code
= GET_CODE (x
);
5578 machine_mode mode
= GET_MODE (x
);
5579 scalar_int_mode int_mode
;
5583 /* If this is a commutative operation, put a constant last and a complex
5584 expression first. We don't need to do this for comparisons here. */
5585 maybe_swap_commutative_operands (x
);
5587 /* Try to fold this expression in case we have constants that weren't
5590 switch (GET_RTX_CLASS (code
))
5593 if (op0_mode
== VOIDmode
)
5594 op0_mode
= GET_MODE (XEXP (x
, 0));
5595 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5598 case RTX_COMM_COMPARE
:
5600 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5601 if (cmp_mode
== VOIDmode
)
5603 cmp_mode
= GET_MODE (XEXP (x
, 1));
5604 if (cmp_mode
== VOIDmode
)
5605 cmp_mode
= op0_mode
;
5607 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5608 XEXP (x
, 0), XEXP (x
, 1));
5611 case RTX_COMM_ARITH
:
5613 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5615 case RTX_BITFIELD_OPS
:
5617 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5618 XEXP (x
, 1), XEXP (x
, 2));
5627 code
= GET_CODE (temp
);
5628 op0_mode
= VOIDmode
;
5629 mode
= GET_MODE (temp
);
5632 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5633 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5634 things. Check for cases where both arms are testing the same
5637 Don't do anything if all operands are very simple. */
5640 && ((!OBJECT_P (XEXP (x
, 0))
5641 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5642 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5643 || (!OBJECT_P (XEXP (x
, 1))
5644 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5645 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5647 && (!OBJECT_P (XEXP (x
, 0))
5648 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5649 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5651 rtx cond
, true_rtx
, false_rtx
;
5653 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5655 /* If everything is a comparison, what we have is highly unlikely
5656 to be simpler, so don't use it. */
5657 && ! (COMPARISON_P (x
)
5658 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
))))
5660 rtx cop1
= const0_rtx
;
5661 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5663 if (cond_code
== NE
&& COMPARISON_P (cond
))
5666 /* Simplify the alternative arms; this may collapse the true and
5667 false arms to store-flag values. Be careful to use copy_rtx
5668 here since true_rtx or false_rtx might share RTL with x as a
5669 result of the if_then_else_cond call above. */
5670 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5671 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5673 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5674 is unlikely to be simpler. */
5675 if (general_operand (true_rtx
, VOIDmode
)
5676 && general_operand (false_rtx
, VOIDmode
))
5678 enum rtx_code reversed
;
5680 /* Restarting if we generate a store-flag expression will cause
5681 us to loop. Just drop through in this case. */
5683 /* If the result values are STORE_FLAG_VALUE and zero, we can
5684 just make the comparison operation. */
5685 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5686 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5688 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5689 && ((reversed
= reversed_comparison_code_parts
5690 (cond_code
, cond
, cop1
, NULL
))
5692 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5695 /* Likewise, we can make the negate of a comparison operation
5696 if the result values are - STORE_FLAG_VALUE and zero. */
5697 else if (CONST_INT_P (true_rtx
)
5698 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5699 && false_rtx
== const0_rtx
)
5700 x
= simplify_gen_unary (NEG
, mode
,
5701 simplify_gen_relational (cond_code
,
5705 else if (CONST_INT_P (false_rtx
)
5706 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5707 && true_rtx
== const0_rtx
5708 && ((reversed
= reversed_comparison_code_parts
5709 (cond_code
, cond
, cop1
, NULL
))
5711 x
= simplify_gen_unary (NEG
, mode
,
5712 simplify_gen_relational (reversed
,
5717 return gen_rtx_IF_THEN_ELSE (mode
,
5718 simplify_gen_relational (cond_code
,
5723 true_rtx
, false_rtx
);
5725 code
= GET_CODE (x
);
5726 op0_mode
= VOIDmode
;
5731 /* First see if we can apply the inverse distributive law. */
5732 if (code
== PLUS
|| code
== MINUS
5733 || code
== AND
|| code
== IOR
|| code
== XOR
)
5735 x
= apply_distributive_law (x
);
5736 code
= GET_CODE (x
);
5737 op0_mode
= VOIDmode
;
5740 /* If CODE is an associative operation not otherwise handled, see if we
5741 can associate some operands. This can win if they are constants or
5742 if they are logically related (i.e. (a & b) & a). */
5743 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5744 || code
== AND
|| code
== IOR
|| code
== XOR
5745 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5746 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5747 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5749 if (GET_CODE (XEXP (x
, 0)) == code
)
5751 rtx other
= XEXP (XEXP (x
, 0), 0);
5752 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5753 rtx inner_op1
= XEXP (x
, 1);
5756 /* Make sure we pass the constant operand if any as the second
5757 one if this is a commutative operation. */
5758 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5759 std::swap (inner_op0
, inner_op1
);
5760 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5761 : code
== DIV
? MULT
5763 mode
, inner_op0
, inner_op1
);
5765 /* For commutative operations, try the other pair if that one
5767 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5769 other
= XEXP (XEXP (x
, 0), 1);
5770 inner
= simplify_binary_operation (code
, mode
,
5771 XEXP (XEXP (x
, 0), 0),
5776 return simplify_gen_binary (code
, mode
, other
, inner
);
5780 /* A little bit of algebraic simplification here. */
5784 /* Ensure that our address has any ASHIFTs converted to MULT in case
5785 address-recognizing predicates are called later. */
5786 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5787 SUBST (XEXP (x
, 0), temp
);
5791 if (op0_mode
== VOIDmode
)
5792 op0_mode
= GET_MODE (SUBREG_REG (x
));
5794 /* See if this can be moved to simplify_subreg. */
5795 if (CONSTANT_P (SUBREG_REG (x
))
5796 && subreg_lowpart_offset (mode
, op0_mode
) == SUBREG_BYTE (x
)
5797 /* Don't call gen_lowpart if the inner mode
5798 is VOIDmode and we cannot simplify it, as SUBREG without
5799 inner mode is invalid. */
5800 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5801 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5802 return gen_lowpart (mode
, SUBREG_REG (x
));
5804 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
5808 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
5813 /* If op is known to have all lower bits zero, the result is zero. */
5814 scalar_int_mode int_mode
, int_op0_mode
;
5816 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5817 && is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
)
5818 && (GET_MODE_PRECISION (int_mode
)
5819 < GET_MODE_PRECISION (int_op0_mode
))
5820 && (subreg_lowpart_offset (int_mode
, int_op0_mode
)
5822 && HWI_COMPUTABLE_MODE_P (int_op0_mode
)
5823 && (nonzero_bits (SUBREG_REG (x
), int_op0_mode
)
5824 & GET_MODE_MASK (int_mode
)) == 0)
5825 return CONST0_RTX (int_mode
);
5828 /* Don't change the mode of the MEM if that would change the meaning
5830 if (MEM_P (SUBREG_REG (x
))
5831 && (MEM_VOLATILE_P (SUBREG_REG (x
))
5832 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0),
5833 MEM_ADDR_SPACE (SUBREG_REG (x
)))))
5834 return gen_rtx_CLOBBER (mode
, const0_rtx
);
5836 /* Note that we cannot do any narrowing for non-constants since
5837 we might have been counting on using the fact that some bits were
5838 zero. We now do this in the SET. */
5843 temp
= expand_compound_operation (XEXP (x
, 0));
5845 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5846 replaced by (lshiftrt X C). This will convert
5847 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5849 if (GET_CODE (temp
) == ASHIFTRT
5850 && CONST_INT_P (XEXP (temp
, 1))
5851 && INTVAL (XEXP (temp
, 1)) == GET_MODE_PRECISION (mode
) - 1)
5852 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
5853 INTVAL (XEXP (temp
, 1)));
5855 /* If X has only a single bit that might be nonzero, say, bit I, convert
5856 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5857 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5858 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5859 or a SUBREG of one since we'd be making the expression more
5860 complex if it was just a register. */
5863 && ! (GET_CODE (temp
) == SUBREG
5864 && REG_P (SUBREG_REG (temp
)))
5865 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5866 && (i
= exact_log2 (nonzero_bits (temp
, int_mode
))) >= 0)
5868 rtx temp1
= simplify_shift_const
5869 (NULL_RTX
, ASHIFTRT
, int_mode
,
5870 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
, temp
,
5871 GET_MODE_PRECISION (int_mode
) - 1 - i
),
5872 GET_MODE_PRECISION (int_mode
) - 1 - i
);
5874 /* If all we did was surround TEMP with the two shifts, we
5875 haven't improved anything, so don't use it. Otherwise,
5876 we are better off with TEMP1. */
5877 if (GET_CODE (temp1
) != ASHIFTRT
5878 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
5879 || XEXP (XEXP (temp1
, 0), 0) != temp
)
5885 /* We can't handle truncation to a partial integer mode here
5886 because we don't know the real bitsize of the partial
5888 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
5891 if (HWI_COMPUTABLE_MODE_P (mode
))
5893 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5894 GET_MODE_MASK (mode
), 0));
5896 /* We can truncate a constant value and return it. */
5897 if (CONST_INT_P (XEXP (x
, 0)))
5898 return gen_int_mode (INTVAL (XEXP (x
, 0)), mode
);
5900 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5901 whose value is a comparison can be replaced with a subreg if
5902 STORE_FLAG_VALUE permits. */
5903 if (HWI_COMPUTABLE_MODE_P (mode
)
5904 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
5905 && (temp
= get_last_value (XEXP (x
, 0)))
5906 && COMPARISON_P (temp
))
5907 return gen_lowpart (mode
, XEXP (x
, 0));
5911 /* (const (const X)) can become (const X). Do it this way rather than
5912 returning the inner CONST since CONST can be shared with a
5914 if (GET_CODE (XEXP (x
, 0)) == CONST
)
5915 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
5919 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5920 can add in an offset. find_split_point will split this address up
5921 again if it doesn't match. */
5922 if (HAVE_lo_sum
&& GET_CODE (XEXP (x
, 0)) == HIGH
5923 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
5928 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5929 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5930 bit-field and can be replaced by either a sign_extend or a
5931 sign_extract. The `and' may be a zero_extend and the two
5932 <c>, -<c> constants may be reversed. */
5933 if (GET_CODE (XEXP (x
, 0)) == XOR
5934 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5935 && CONST_INT_P (XEXP (x
, 1))
5936 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
5937 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
5938 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
5939 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
5940 && HWI_COMPUTABLE_MODE_P (int_mode
)
5941 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
5942 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5943 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5944 == (HOST_WIDE_INT_1U
<< (i
+ 1)) - 1))
5945 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
5946 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))
5947 == (unsigned int) i
+ 1))))
5948 return simplify_shift_const
5949 (NULL_RTX
, ASHIFTRT
, int_mode
,
5950 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
5951 XEXP (XEXP (XEXP (x
, 0), 0), 0),
5952 GET_MODE_PRECISION (int_mode
) - (i
+ 1)),
5953 GET_MODE_PRECISION (int_mode
) - (i
+ 1));
5955 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5956 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5957 the bitsize of the mode - 1. This allows simplification of
5958 "a = (b & 8) == 0;" */
5959 if (XEXP (x
, 1) == constm1_rtx
5960 && !REG_P (XEXP (x
, 0))
5961 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5962 && REG_P (SUBREG_REG (XEXP (x
, 0))))
5963 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5964 && nonzero_bits (XEXP (x
, 0), int_mode
) == 1)
5965 return simplify_shift_const
5966 (NULL_RTX
, ASHIFTRT
, int_mode
,
5967 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
5968 gen_rtx_XOR (int_mode
, XEXP (x
, 0),
5970 GET_MODE_PRECISION (int_mode
) - 1),
5971 GET_MODE_PRECISION (int_mode
) - 1);
5973 /* If we are adding two things that have no bits in common, convert
5974 the addition into an IOR. This will often be further simplified,
5975 for example in cases like ((a & 1) + (a & 2)), which can
5978 if (HWI_COMPUTABLE_MODE_P (mode
)
5979 && (nonzero_bits (XEXP (x
, 0), mode
)
5980 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
5982 /* Try to simplify the expression further. */
5983 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5984 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
, 0);
5986 /* If we could, great. If not, do not go ahead with the IOR
5987 replacement, since PLUS appears in many special purpose
5988 address arithmetic instructions. */
5989 if (GET_CODE (temp
) != CLOBBER
5990 && (GET_CODE (temp
) != IOR
5991 || ((XEXP (temp
, 0) != XEXP (x
, 0)
5992 || XEXP (temp
, 1) != XEXP (x
, 1))
5993 && (XEXP (temp
, 0) != XEXP (x
, 1)
5994 || XEXP (temp
, 1) != XEXP (x
, 0)))))
5998 /* Canonicalize x + x into x << 1. */
5999 if (GET_MODE_CLASS (mode
) == MODE_INT
6000 && rtx_equal_p (XEXP (x
, 0), XEXP (x
, 1))
6001 && !side_effects_p (XEXP (x
, 0)))
6002 return simplify_gen_binary (ASHIFT
, mode
, XEXP (x
, 0), const1_rtx
);
6007 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6008 (and <foo> (const_int pow2-1)) */
6009 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6010 && GET_CODE (XEXP (x
, 1)) == AND
6011 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
6012 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x
, 1), 1)))
6013 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
6014 return simplify_and_const_int (NULL_RTX
, int_mode
, XEXP (x
, 0),
6015 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
6019 /* If we have (mult (plus A B) C), apply the distributive law and then
6020 the inverse distributive law to see if things simplify. This
6021 occurs mostly in addresses, often when unrolling loops. */
6023 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
6025 rtx result
= distribute_and_simplify_rtx (x
, 0);
6030 /* Try simplify a*(b/c) as (a*b)/c. */
6031 if (FLOAT_MODE_P (mode
) && flag_associative_math
6032 && GET_CODE (XEXP (x
, 0)) == DIV
)
6034 rtx tem
= simplify_binary_operation (MULT
, mode
,
6035 XEXP (XEXP (x
, 0), 0),
6038 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
6043 /* If this is a divide by a power of two, treat it as a shift if
6044 its first operand is a shift. */
6045 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6046 && CONST_INT_P (XEXP (x
, 1))
6047 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
6048 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
6049 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
6050 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
6051 || GET_CODE (XEXP (x
, 0)) == ROTATE
6052 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
6053 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, int_mode
,
6058 case GT
: case GTU
: case GE
: case GEU
:
6059 case LT
: case LTU
: case LE
: case LEU
:
6060 case UNEQ
: case LTGT
:
6061 case UNGT
: case UNGE
:
6062 case UNLT
: case UNLE
:
6063 case UNORDERED
: case ORDERED
:
6064 /* If the first operand is a condition code, we can't do anything
6066 if (GET_CODE (XEXP (x
, 0)) == COMPARE
6067 || (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
6068 && ! CC0_P (XEXP (x
, 0))))
6070 rtx op0
= XEXP (x
, 0);
6071 rtx op1
= XEXP (x
, 1);
6072 enum rtx_code new_code
;
6074 if (GET_CODE (op0
) == COMPARE
)
6075 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
6077 /* Simplify our comparison, if possible. */
6078 new_code
= simplify_comparison (code
, &op0
, &op1
);
6080 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6081 if only the low-order bit is possibly nonzero in X (such as when
6082 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6083 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6084 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6087 Remove any ZERO_EXTRACT we made when thinking this was a
6088 comparison. It may now be simpler to use, e.g., an AND. If a
6089 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6090 the call to make_compound_operation in the SET case.
6092 Don't apply these optimizations if the caller would
6093 prefer a comparison rather than a value.
6094 E.g., for the condition in an IF_THEN_ELSE most targets need
6095 an explicit comparison. */
6100 else if (STORE_FLAG_VALUE
== 1
6102 && is_int_mode (mode
, &int_mode
)
6103 && op1
== const0_rtx
6104 && int_mode
== GET_MODE (op0
)
6105 && nonzero_bits (op0
, int_mode
) == 1)
6106 return gen_lowpart (int_mode
,
6107 expand_compound_operation (op0
));
6109 else if (STORE_FLAG_VALUE
== 1
6111 && is_int_mode (mode
, &int_mode
)
6112 && op1
== const0_rtx
6113 && int_mode
== GET_MODE (op0
)
6114 && (num_sign_bit_copies (op0
, int_mode
)
6115 == GET_MODE_PRECISION (int_mode
)))
6117 op0
= expand_compound_operation (op0
);
6118 return simplify_gen_unary (NEG
, int_mode
,
6119 gen_lowpart (int_mode
, op0
),
6123 else if (STORE_FLAG_VALUE
== 1
6125 && is_int_mode (mode
, &int_mode
)
6126 && op1
== const0_rtx
6127 && int_mode
== GET_MODE (op0
)
6128 && nonzero_bits (op0
, int_mode
) == 1)
6130 op0
= expand_compound_operation (op0
);
6131 return simplify_gen_binary (XOR
, int_mode
,
6132 gen_lowpart (int_mode
, op0
),
6136 else if (STORE_FLAG_VALUE
== 1
6138 && is_int_mode (mode
, &int_mode
)
6139 && op1
== const0_rtx
6140 && int_mode
== GET_MODE (op0
)
6141 && (num_sign_bit_copies (op0
, int_mode
)
6142 == GET_MODE_PRECISION (int_mode
)))
6144 op0
= expand_compound_operation (op0
);
6145 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), 1);
6148 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6153 else if (STORE_FLAG_VALUE
== -1
6155 && is_int_mode (mode
, &int_mode
)
6156 && op1
== const0_rtx
6157 && int_mode
== GET_MODE (op0
)
6158 && (num_sign_bit_copies (op0
, int_mode
)
6159 == GET_MODE_PRECISION (int_mode
)))
6160 return gen_lowpart (int_mode
, expand_compound_operation (op0
));
6162 else if (STORE_FLAG_VALUE
== -1
6164 && is_int_mode (mode
, &int_mode
)
6165 && op1
== const0_rtx
6166 && int_mode
== GET_MODE (op0
)
6167 && nonzero_bits (op0
, int_mode
) == 1)
6169 op0
= expand_compound_operation (op0
);
6170 return simplify_gen_unary (NEG
, int_mode
,
6171 gen_lowpart (int_mode
, op0
),
6175 else if (STORE_FLAG_VALUE
== -1
6177 && is_int_mode (mode
, &int_mode
)
6178 && op1
== const0_rtx
6179 && int_mode
== GET_MODE (op0
)
6180 && (num_sign_bit_copies (op0
, int_mode
)
6181 == GET_MODE_PRECISION (int_mode
)))
6183 op0
= expand_compound_operation (op0
);
6184 return simplify_gen_unary (NOT
, int_mode
,
6185 gen_lowpart (int_mode
, op0
),
6189 /* If X is 0/1, (eq X 0) is X-1. */
6190 else if (STORE_FLAG_VALUE
== -1
6192 && is_int_mode (mode
, &int_mode
)
6193 && op1
== const0_rtx
6194 && int_mode
== GET_MODE (op0
)
6195 && nonzero_bits (op0
, int_mode
) == 1)
6197 op0
= expand_compound_operation (op0
);
6198 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), -1);
6201 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6202 one bit that might be nonzero, we can convert (ne x 0) to
6203 (ashift x c) where C puts the bit in the sign bit. Remove any
6204 AND with STORE_FLAG_VALUE when we are done, since we are only
6205 going to test the sign bit. */
6207 && is_int_mode (mode
, &int_mode
)
6208 && HWI_COMPUTABLE_MODE_P (int_mode
)
6209 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
6210 && op1
== const0_rtx
6211 && int_mode
== GET_MODE (op0
)
6212 && (i
= exact_log2 (nonzero_bits (op0
, int_mode
))) >= 0)
6214 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6215 expand_compound_operation (op0
),
6216 GET_MODE_PRECISION (int_mode
) - 1 - i
);
6217 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
6223 /* If the code changed, return a whole new comparison.
6224 We also need to avoid using SUBST in cases where
6225 simplify_comparison has widened a comparison with a CONST_INT,
6226 since in that case the wider CONST_INT may fail the sanity
6227 checks in do_SUBST. */
6228 if (new_code
!= code
6229 || (CONST_INT_P (op1
)
6230 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 0))
6231 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 1))))
6232 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
6234 /* Otherwise, keep this operation, but maybe change its operands.
6235 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6236 SUBST (XEXP (x
, 0), op0
);
6237 SUBST (XEXP (x
, 1), op1
);
6242 return simplify_if_then_else (x
);
6248 /* If we are processing SET_DEST, we are done. */
6252 return expand_compound_operation (x
);
6255 return simplify_set (x
);
6259 return simplify_logical (x
);
6266 /* If this is a shift by a constant amount, simplify it. */
6267 if (CONST_INT_P (XEXP (x
, 1)))
6268 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
6269 INTVAL (XEXP (x
, 1)));
6271 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
6273 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
6275 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x
))))
6287 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6290 simplify_if_then_else (rtx x
)
6292 machine_mode mode
= GET_MODE (x
);
6293 rtx cond
= XEXP (x
, 0);
6294 rtx true_rtx
= XEXP (x
, 1);
6295 rtx false_rtx
= XEXP (x
, 2);
6296 enum rtx_code true_code
= GET_CODE (cond
);
6297 int comparison_p
= COMPARISON_P (cond
);
6300 enum rtx_code false_code
;
6302 scalar_int_mode int_mode
, inner_mode
;
6304 /* Simplify storing of the truth value. */
6305 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
6306 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
6307 XEXP (cond
, 0), XEXP (cond
, 1));
6309 /* Also when the truth value has to be reversed. */
6311 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
6312 && (reversed
= reversed_comparison (cond
, mode
)))
6315 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6316 in it is being compared against certain values. Get the true and false
6317 comparisons and see if that says anything about the value of each arm. */
6320 && ((false_code
= reversed_comparison_code (cond
, NULL
))
6322 && REG_P (XEXP (cond
, 0)))
6325 rtx from
= XEXP (cond
, 0);
6326 rtx true_val
= XEXP (cond
, 1);
6327 rtx false_val
= true_val
;
6330 /* If FALSE_CODE is EQ, swap the codes and arms. */
6332 if (false_code
== EQ
)
6334 swapped
= 1, true_code
= EQ
, false_code
= NE
;
6335 std::swap (true_rtx
, false_rtx
);
6338 scalar_int_mode from_mode
;
6339 if (is_a
<scalar_int_mode
> (GET_MODE (from
), &from_mode
))
6341 /* If we are comparing against zero and the expression being
6342 tested has only a single bit that might be nonzero, that is
6343 its value when it is not equal to zero. Similarly if it is
6344 known to be -1 or 0. */
6346 && true_val
== const0_rtx
6347 && pow2p_hwi (nzb
= nonzero_bits (from
, from_mode
)))
6350 false_val
= gen_int_mode (nzb
, from_mode
);
6352 else if (true_code
== EQ
6353 && true_val
== const0_rtx
6354 && (num_sign_bit_copies (from
, from_mode
)
6355 == GET_MODE_PRECISION (from_mode
)))
6358 false_val
= constm1_rtx
;
6362 /* Now simplify an arm if we know the value of the register in the
6363 branch and it is used in the arm. Be careful due to the potential
6364 of locally-shared RTL. */
6366 if (reg_mentioned_p (from
, true_rtx
))
6367 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
6369 pc_rtx
, pc_rtx
, 0, 0, 0);
6370 if (reg_mentioned_p (from
, false_rtx
))
6371 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
6373 pc_rtx
, pc_rtx
, 0, 0, 0);
6375 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
6376 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
6378 true_rtx
= XEXP (x
, 1);
6379 false_rtx
= XEXP (x
, 2);
6380 true_code
= GET_CODE (cond
);
6383 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6384 reversed, do so to avoid needing two sets of patterns for
6385 subtract-and-branch insns. Similarly if we have a constant in the true
6386 arm, the false arm is the same as the first operand of the comparison, or
6387 the false arm is more complicated than the true arm. */
6390 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6391 && (true_rtx
== pc_rtx
6392 || (CONSTANT_P (true_rtx
)
6393 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6394 || true_rtx
== const0_rtx
6395 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6396 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6397 && !OBJECT_P (false_rtx
))
6398 || reg_mentioned_p (true_rtx
, false_rtx
)
6399 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6401 true_code
= reversed_comparison_code (cond
, NULL
);
6402 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6403 SUBST (XEXP (x
, 1), false_rtx
);
6404 SUBST (XEXP (x
, 2), true_rtx
);
6406 std::swap (true_rtx
, false_rtx
);
6409 /* It is possible that the conditional has been simplified out. */
6410 true_code
= GET_CODE (cond
);
6411 comparison_p
= COMPARISON_P (cond
);
6414 /* If the two arms are identical, we don't need the comparison. */
6416 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6419 /* Convert a == b ? b : a to "a". */
6420 if (true_code
== EQ
&& ! side_effects_p (cond
)
6421 && !HONOR_NANS (mode
)
6422 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6423 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6425 else if (true_code
== NE
&& ! side_effects_p (cond
)
6426 && !HONOR_NANS (mode
)
6427 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6428 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6431 /* Look for cases where we have (abs x) or (neg (abs X)). */
6433 if (GET_MODE_CLASS (mode
) == MODE_INT
6435 && XEXP (cond
, 1) == const0_rtx
6436 && GET_CODE (false_rtx
) == NEG
6437 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6438 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6439 && ! side_effects_p (true_rtx
))
6444 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6448 simplify_gen_unary (NEG
, mode
,
6449 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6455 /* Look for MIN or MAX. */
6457 if ((! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
6459 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6460 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6461 && ! side_effects_p (cond
))
6466 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6469 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6472 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6475 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6480 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6481 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6482 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6483 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6484 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6485 neither 1 or -1, but it isn't worth checking for. */
6487 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6489 && is_int_mode (mode
, &int_mode
)
6490 && ! side_effects_p (x
))
6492 rtx t
= make_compound_operation (true_rtx
, SET
);
6493 rtx f
= make_compound_operation (false_rtx
, SET
);
6494 rtx cond_op0
= XEXP (cond
, 0);
6495 rtx cond_op1
= XEXP (cond
, 1);
6496 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6497 machine_mode m
= int_mode
;
6498 rtx z
= 0, c1
= NULL_RTX
;
6500 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6501 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6502 || GET_CODE (t
) == ASHIFT
6503 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6504 && rtx_equal_p (XEXP (t
, 0), f
))
6505 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6507 /* If an identity-zero op is commutative, check whether there
6508 would be a match if we swapped the operands. */
6509 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6510 || GET_CODE (t
) == XOR
)
6511 && rtx_equal_p (XEXP (t
, 1), f
))
6512 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6513 else if (GET_CODE (t
) == SIGN_EXTEND
6514 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6515 && (GET_CODE (XEXP (t
, 0)) == PLUS
6516 || GET_CODE (XEXP (t
, 0)) == MINUS
6517 || GET_CODE (XEXP (t
, 0)) == IOR
6518 || GET_CODE (XEXP (t
, 0)) == XOR
6519 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6520 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6521 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6522 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6523 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6524 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6525 && (num_sign_bit_copies (f
, GET_MODE (f
))
6527 (GET_MODE_PRECISION (int_mode
)
6528 - GET_MODE_PRECISION (inner_mode
))))
6530 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6531 extend_op
= SIGN_EXTEND
;
6534 else if (GET_CODE (t
) == SIGN_EXTEND
6535 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6536 && (GET_CODE (XEXP (t
, 0)) == PLUS
6537 || GET_CODE (XEXP (t
, 0)) == IOR
6538 || GET_CODE (XEXP (t
, 0)) == XOR
)
6539 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6540 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6541 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6542 && (num_sign_bit_copies (f
, GET_MODE (f
))
6544 (GET_MODE_PRECISION (int_mode
)
6545 - GET_MODE_PRECISION (inner_mode
))))
6547 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6548 extend_op
= SIGN_EXTEND
;
6551 else if (GET_CODE (t
) == ZERO_EXTEND
6552 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6553 && (GET_CODE (XEXP (t
, 0)) == PLUS
6554 || GET_CODE (XEXP (t
, 0)) == MINUS
6555 || GET_CODE (XEXP (t
, 0)) == IOR
6556 || GET_CODE (XEXP (t
, 0)) == XOR
6557 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6558 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6559 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6560 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6561 && HWI_COMPUTABLE_MODE_P (int_mode
)
6562 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6563 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6564 && ((nonzero_bits (f
, GET_MODE (f
))
6565 & ~GET_MODE_MASK (inner_mode
))
6568 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6569 extend_op
= ZERO_EXTEND
;
6572 else if (GET_CODE (t
) == ZERO_EXTEND
6573 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6574 && (GET_CODE (XEXP (t
, 0)) == PLUS
6575 || GET_CODE (XEXP (t
, 0)) == IOR
6576 || GET_CODE (XEXP (t
, 0)) == XOR
)
6577 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6578 && HWI_COMPUTABLE_MODE_P (int_mode
)
6579 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6580 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6581 && ((nonzero_bits (f
, GET_MODE (f
))
6582 & ~GET_MODE_MASK (inner_mode
))
6585 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6586 extend_op
= ZERO_EXTEND
;
6592 temp
= subst (simplify_gen_relational (true_code
, m
, VOIDmode
,
6593 cond_op0
, cond_op1
),
6594 pc_rtx
, pc_rtx
, 0, 0, 0);
6595 temp
= simplify_gen_binary (MULT
, m
, temp
,
6596 simplify_gen_binary (MULT
, m
, c1
,
6598 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0, 0);
6599 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6601 if (extend_op
!= UNKNOWN
)
6602 temp
= simplify_gen_unary (extend_op
, int_mode
, temp
, m
);
6608 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6609 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6610 negation of a single bit, we can convert this operation to a shift. We
6611 can actually do this more generally, but it doesn't seem worth it. */
6614 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6615 && XEXP (cond
, 1) == const0_rtx
6616 && false_rtx
== const0_rtx
6617 && CONST_INT_P (true_rtx
)
6618 && ((1 == nonzero_bits (XEXP (cond
, 0), int_mode
)
6619 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6620 || ((num_sign_bit_copies (XEXP (cond
, 0), int_mode
)
6621 == GET_MODE_PRECISION (int_mode
))
6622 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6624 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6625 gen_lowpart (int_mode
, XEXP (cond
, 0)), i
);
6627 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6628 non-zero bit in A is C1. */
6629 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6630 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6631 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6632 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (cond
, 0)), &inner_mode
)
6633 && (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))
6634 == nonzero_bits (XEXP (cond
, 0), inner_mode
)
6635 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))) >= 0)
6637 rtx val
= XEXP (cond
, 0);
6638 if (inner_mode
== int_mode
)
6640 else if (GET_MODE_PRECISION (inner_mode
) < GET_MODE_PRECISION (int_mode
))
6641 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, val
, inner_mode
);
6647 /* Simplify X, a SET expression. Return the new expression. */
6650 simplify_set (rtx x
)
6652 rtx src
= SET_SRC (x
);
6653 rtx dest
= SET_DEST (x
);
6655 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6656 rtx_insn
*other_insn
;
6658 scalar_int_mode int_mode
;
6660 /* (set (pc) (return)) gets written as (return). */
6661 if (GET_CODE (dest
) == PC
&& ANY_RETURN_P (src
))
6664 /* Now that we know for sure which bits of SRC we are using, see if we can
6665 simplify the expression for the object knowing that we only need the
6668 if (GET_MODE_CLASS (mode
) == MODE_INT
&& HWI_COMPUTABLE_MODE_P (mode
))
6670 src
= force_to_mode (src
, mode
, HOST_WIDE_INT_M1U
, 0);
6671 SUBST (SET_SRC (x
), src
);
6674 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6675 the comparison result and try to simplify it unless we already have used
6676 undobuf.other_insn. */
6677 if ((GET_MODE_CLASS (mode
) == MODE_CC
6678 || GET_CODE (src
) == COMPARE
6680 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6681 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6682 && COMPARISON_P (*cc_use
)
6683 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6685 enum rtx_code old_code
= GET_CODE (*cc_use
);
6686 enum rtx_code new_code
;
6688 int other_changed
= 0;
6689 rtx inner_compare
= NULL_RTX
;
6690 machine_mode compare_mode
= GET_MODE (dest
);
6692 if (GET_CODE (src
) == COMPARE
)
6694 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6695 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6697 inner_compare
= op0
;
6698 op0
= XEXP (inner_compare
, 0), op1
= XEXP (inner_compare
, 1);
6702 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6704 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6707 new_code
= old_code
;
6708 else if (!CONSTANT_P (tmp
))
6710 new_code
= GET_CODE (tmp
);
6711 op0
= XEXP (tmp
, 0);
6712 op1
= XEXP (tmp
, 1);
6716 rtx pat
= PATTERN (other_insn
);
6717 undobuf
.other_insn
= other_insn
;
6718 SUBST (*cc_use
, tmp
);
6720 /* Attempt to simplify CC user. */
6721 if (GET_CODE (pat
) == SET
)
6723 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6724 if (new_rtx
!= NULL_RTX
)
6725 SUBST (SET_SRC (pat
), new_rtx
);
6728 /* Convert X into a no-op move. */
6729 SUBST (SET_DEST (x
), pc_rtx
);
6730 SUBST (SET_SRC (x
), pc_rtx
);
6734 /* Simplify our comparison, if possible. */
6735 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6737 #ifdef SELECT_CC_MODE
6738 /* If this machine has CC modes other than CCmode, check to see if we
6739 need to use a different CC mode here. */
6740 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6741 compare_mode
= GET_MODE (op0
);
6742 else if (inner_compare
6743 && GET_MODE_CLASS (GET_MODE (inner_compare
)) == MODE_CC
6744 && new_code
== old_code
6745 && op0
== XEXP (inner_compare
, 0)
6746 && op1
== XEXP (inner_compare
, 1))
6747 compare_mode
= GET_MODE (inner_compare
);
6749 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6751 /* If the mode changed, we have to change SET_DEST, the mode in the
6752 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6753 a hard register, just build new versions with the proper mode. If it
6754 is a pseudo, we lose unless it is only time we set the pseudo, in
6755 which case we can safely change its mode. */
6756 if (!HAVE_cc0
&& compare_mode
!= GET_MODE (dest
))
6758 if (can_change_dest_mode (dest
, 0, compare_mode
))
6760 unsigned int regno
= REGNO (dest
);
6763 if (regno
< FIRST_PSEUDO_REGISTER
)
6764 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6767 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6768 new_dest
= regno_reg_rtx
[regno
];
6771 SUBST (SET_DEST (x
), new_dest
);
6772 SUBST (XEXP (*cc_use
, 0), new_dest
);
6778 #endif /* SELECT_CC_MODE */
6780 /* If the code changed, we have to build a new comparison in
6781 undobuf.other_insn. */
6782 if (new_code
!= old_code
)
6784 int other_changed_previously
= other_changed
;
6785 unsigned HOST_WIDE_INT mask
;
6786 rtx old_cc_use
= *cc_use
;
6788 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6792 /* If the only change we made was to change an EQ into an NE or
6793 vice versa, OP0 has only one bit that might be nonzero, and OP1
6794 is zero, check if changing the user of the condition code will
6795 produce a valid insn. If it won't, we can keep the original code
6796 in that insn by surrounding our operation with an XOR. */
6798 if (((old_code
== NE
&& new_code
== EQ
)
6799 || (old_code
== EQ
&& new_code
== NE
))
6800 && ! other_changed_previously
&& op1
== const0_rtx
6801 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
6802 && pow2p_hwi (mask
= nonzero_bits (op0
, GET_MODE (op0
))))
6804 rtx pat
= PATTERN (other_insn
), note
= 0;
6806 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
6807 && ! check_asm_operands (pat
)))
6809 *cc_use
= old_cc_use
;
6812 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
), op0
,
6820 undobuf
.other_insn
= other_insn
;
6822 /* Don't generate a compare of a CC with 0, just use that CC. */
6823 if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
6825 SUBST (SET_SRC (x
), op0
);
6828 /* Otherwise, if we didn't previously have the same COMPARE we
6829 want, create it from scratch. */
6830 else if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
6831 || XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
6833 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6839 /* Get SET_SRC in a form where we have placed back any
6840 compound expressions. Then do the checks below. */
6841 src
= make_compound_operation (src
, SET
);
6842 SUBST (SET_SRC (x
), src
);
6845 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6846 and X being a REG or (subreg (reg)), we may be able to convert this to
6847 (set (subreg:m2 x) (op)).
6849 We can always do this if M1 is narrower than M2 because that means that
6850 we only care about the low bits of the result.
6852 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6853 perform a narrower operation than requested since the high-order bits will
6854 be undefined. On machine where it is defined, this transformation is safe
6855 as long as M1 and M2 have the same number of words. */
6857 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6858 && !OBJECT_P (SUBREG_REG (src
))
6859 && (((GET_MODE_SIZE (GET_MODE (src
)) + (UNITS_PER_WORD
- 1))
6861 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
)))
6862 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))
6863 && (WORD_REGISTER_OPERATIONS
|| !paradoxical_subreg_p (src
))
6864 #ifdef CANNOT_CHANGE_MODE_CLASS
6865 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
6866 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest
),
6867 GET_MODE (SUBREG_REG (src
)),
6871 || (GET_CODE (dest
) == SUBREG
6872 && REG_P (SUBREG_REG (dest
)))))
6874 SUBST (SET_DEST (x
),
6875 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
6877 SUBST (SET_SRC (x
), SUBREG_REG (src
));
6879 src
= SET_SRC (x
), dest
= SET_DEST (x
);
6882 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6885 && GET_CODE (src
) == SUBREG
6886 && subreg_lowpart_p (src
)
6887 && (GET_MODE_PRECISION (GET_MODE (src
))
6888 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src
)))))
6890 rtx inner
= SUBREG_REG (src
);
6891 machine_mode inner_mode
= GET_MODE (inner
);
6893 /* Here we make sure that we don't have a sign bit on. */
6894 if (val_signbit_known_clear_p (GET_MODE (src
),
6895 nonzero_bits (inner
, inner_mode
)))
6897 SUBST (SET_SRC (x
), inner
);
6902 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6903 would require a paradoxical subreg. Replace the subreg with a
6904 zero_extend to avoid the reload that would otherwise be required. */
6906 enum rtx_code extend_op
;
6907 if (paradoxical_subreg_p (src
)
6908 && MEM_P (SUBREG_REG (src
))
6909 && (extend_op
= load_extend_op (GET_MODE (SUBREG_REG (src
)))) != UNKNOWN
)
6912 gen_rtx_fmt_e (extend_op
, GET_MODE (src
), SUBREG_REG (src
)));
6917 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6918 are comparing an item known to be 0 or -1 against 0, use a logical
6919 operation instead. Check for one of the arms being an IOR of the other
6920 arm with some value. We compute three terms to be IOR'ed together. In
6921 practice, at most two will be nonzero. Then we do the IOR's. */
6923 if (GET_CODE (dest
) != PC
6924 && GET_CODE (src
) == IF_THEN_ELSE
6925 && is_int_mode (GET_MODE (src
), &int_mode
)
6926 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
6927 && XEXP (XEXP (src
, 0), 1) == const0_rtx
6928 && int_mode
== GET_MODE (XEXP (XEXP (src
, 0), 0))
6929 && (!HAVE_conditional_move
6930 || ! can_conditionally_move_p (int_mode
))
6931 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0), int_mode
)
6932 == GET_MODE_PRECISION (int_mode
))
6933 && ! side_effects_p (src
))
6935 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6936 ? XEXP (src
, 1) : XEXP (src
, 2));
6937 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6938 ? XEXP (src
, 2) : XEXP (src
, 1));
6939 rtx term1
= const0_rtx
, term2
, term3
;
6941 if (GET_CODE (true_rtx
) == IOR
6942 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
6943 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
6944 else if (GET_CODE (true_rtx
) == IOR
6945 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
6946 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
6947 else if (GET_CODE (false_rtx
) == IOR
6948 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
6949 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
6950 else if (GET_CODE (false_rtx
) == IOR
6951 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
6952 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
6954 term2
= simplify_gen_binary (AND
, int_mode
,
6955 XEXP (XEXP (src
, 0), 0), true_rtx
);
6956 term3
= simplify_gen_binary (AND
, int_mode
,
6957 simplify_gen_unary (NOT
, int_mode
,
6958 XEXP (XEXP (src
, 0), 0),
6963 simplify_gen_binary (IOR
, int_mode
,
6964 simplify_gen_binary (IOR
, int_mode
,
6971 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6972 whole thing fail. */
6973 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
6975 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
6978 /* Convert this into a field assignment operation, if possible. */
6979 return make_field_assignment (x
);
6982 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6986 simplify_logical (rtx x
)
6988 rtx op0
= XEXP (x
, 0);
6989 rtx op1
= XEXP (x
, 1);
6990 scalar_int_mode mode
;
6992 switch (GET_CODE (x
))
6995 /* We can call simplify_and_const_int only if we don't lose
6996 any (sign) bits when converting INTVAL (op1) to
6997 "unsigned HOST_WIDE_INT". */
6998 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
6999 && CONST_INT_P (op1
)
7000 && (HWI_COMPUTABLE_MODE_P (mode
)
7001 || INTVAL (op1
) > 0))
7003 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
7004 if (GET_CODE (x
) != AND
)
7011 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7012 apply the distributive law and then the inverse distributive
7013 law to see if things simplify. */
7014 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
7016 rtx result
= distribute_and_simplify_rtx (x
, 0);
7020 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
7022 rtx result
= distribute_and_simplify_rtx (x
, 1);
7029 /* If we have (ior (and A B) C), apply the distributive law and then
7030 the inverse distributive law to see if things simplify. */
7032 if (GET_CODE (op0
) == AND
)
7034 rtx result
= distribute_and_simplify_rtx (x
, 0);
7039 if (GET_CODE (op1
) == AND
)
7041 rtx result
= distribute_and_simplify_rtx (x
, 1);
7054 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7055 operations" because they can be replaced with two more basic operations.
7056 ZERO_EXTEND is also considered "compound" because it can be replaced with
7057 an AND operation, which is simpler, though only one operation.
7059 The function expand_compound_operation is called with an rtx expression
7060 and will convert it to the appropriate shifts and AND operations,
7061 simplifying at each stage.
7063 The function make_compound_operation is called to convert an expression
7064 consisting of shifts and ANDs into the equivalent compound expression.
7065 It is the inverse of this function, loosely speaking. */
7068 expand_compound_operation (rtx x
)
7070 unsigned HOST_WIDE_INT pos
= 0, len
;
7072 unsigned int modewidth
;
7074 scalar_int_mode inner_mode
;
7076 switch (GET_CODE (x
))
7082 /* We can't necessarily use a const_int for a multiword mode;
7083 it depends on implicitly extending the value.
7084 Since we don't know the right way to extend it,
7085 we can't tell whether the implicit way is right.
7087 Even for a mode that is no wider than a const_int,
7088 we can't win, because we need to sign extend one of its bits through
7089 the rest of it, and we don't know which bit. */
7090 if (CONST_INT_P (XEXP (x
, 0)))
7093 /* Reject modes that aren't scalar integers because turning vector
7094 or complex modes into shifts causes problems. */
7095 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7098 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7099 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7100 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7101 reloaded. If not for that, MEM's would very rarely be safe.
7103 Reject modes bigger than a word, because we might not be able
7104 to reference a two-register group starting with an arbitrary register
7105 (and currently gen_lowpart might crash for a SUBREG). */
7107 if (GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7110 len
= GET_MODE_PRECISION (inner_mode
);
7111 /* If the inner object has VOIDmode (the only way this can happen
7112 is if it is an ASM_OPERANDS), we can't do anything since we don't
7113 know how much masking to do. */
7125 /* If the operand is a CLOBBER, just return it. */
7126 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
7129 if (!CONST_INT_P (XEXP (x
, 1))
7130 || !CONST_INT_P (XEXP (x
, 2)))
7133 /* Reject modes that aren't scalar integers because turning vector
7134 or complex modes into shifts causes problems. */
7135 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7138 len
= INTVAL (XEXP (x
, 1));
7139 pos
= INTVAL (XEXP (x
, 2));
7141 /* This should stay within the object being extracted, fail otherwise. */
7142 if (len
+ pos
> GET_MODE_PRECISION (inner_mode
))
7145 if (BITS_BIG_ENDIAN
)
7146 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
7154 /* We've rejected non-scalar operations by now. */
7155 scalar_int_mode mode
= as_a
<scalar_int_mode
> (GET_MODE (x
));
7157 /* Convert sign extension to zero extension, if we know that the high
7158 bit is not set, as this is easier to optimize. It will be converted
7159 back to cheaper alternative in make_extraction. */
7160 if (GET_CODE (x
) == SIGN_EXTEND
7161 && HWI_COMPUTABLE_MODE_P (mode
)
7162 && ((nonzero_bits (XEXP (x
, 0), inner_mode
)
7163 & ~(((unsigned HOST_WIDE_INT
) GET_MODE_MASK (inner_mode
)) >> 1))
7166 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, XEXP (x
, 0));
7167 rtx temp2
= expand_compound_operation (temp
);
7169 /* Make sure this is a profitable operation. */
7170 if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7171 > set_src_cost (temp2
, mode
, optimize_this_for_speed_p
))
7173 else if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7174 > set_src_cost (temp
, mode
, optimize_this_for_speed_p
))
7180 /* We can optimize some special cases of ZERO_EXTEND. */
7181 if (GET_CODE (x
) == ZERO_EXTEND
)
7183 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7184 know that the last value didn't have any inappropriate bits
7186 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7187 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7188 && HWI_COMPUTABLE_MODE_P (mode
)
7189 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), mode
)
7190 & ~GET_MODE_MASK (inner_mode
)) == 0)
7191 return XEXP (XEXP (x
, 0), 0);
7193 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7194 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7195 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7196 && subreg_lowpart_p (XEXP (x
, 0))
7197 && HWI_COMPUTABLE_MODE_P (mode
)
7198 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), mode
)
7199 & ~GET_MODE_MASK (inner_mode
)) == 0)
7200 return SUBREG_REG (XEXP (x
, 0));
7202 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7203 is a comparison and STORE_FLAG_VALUE permits. This is like
7204 the first case, but it works even when MODE is larger
7205 than HOST_WIDE_INT. */
7206 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7207 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7208 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
7209 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7210 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7211 return XEXP (XEXP (x
, 0), 0);
7213 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7214 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7215 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7216 && subreg_lowpart_p (XEXP (x
, 0))
7217 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
7218 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7219 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7220 return SUBREG_REG (XEXP (x
, 0));
7224 /* If we reach here, we want to return a pair of shifts. The inner
7225 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7226 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7227 logical depending on the value of UNSIGNEDP.
7229 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7230 converted into an AND of a shift.
7232 We must check for the case where the left shift would have a negative
7233 count. This can happen in a case like (x >> 31) & 255 on machines
7234 that can't shift by a constant. On those machines, we would first
7235 combine the shift with the AND to produce a variable-position
7236 extraction. Then the constant of 31 would be substituted in
7237 to produce such a position. */
7239 modewidth
= GET_MODE_PRECISION (mode
);
7240 if (modewidth
>= pos
+ len
)
7242 tem
= gen_lowpart (mode
, XEXP (x
, 0));
7243 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7245 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
7246 tem
, modewidth
- pos
- len
);
7247 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
7248 mode
, tem
, modewidth
- len
);
7250 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
7251 tem
= simplify_and_const_int (NULL_RTX
, mode
,
7252 simplify_shift_const (NULL_RTX
, LSHIFTRT
,
7255 (HOST_WIDE_INT_1U
<< len
) - 1);
7257 /* Any other cases we can't handle. */
7260 /* If we couldn't do this for some reason, return the original
7262 if (GET_CODE (tem
) == CLOBBER
)
7268 /* X is a SET which contains an assignment of one object into
7269 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7270 or certain SUBREGS). If possible, convert it into a series of
7273 We half-heartedly support variable positions, but do not at all
7274 support variable lengths. */
7277 expand_field_assignment (const_rtx x
)
7280 rtx pos
; /* Always counts from low bit. */
7282 rtx mask
, cleared
, masked
;
7283 scalar_int_mode compute_mode
;
7285 /* Loop until we find something we can't simplify. */
7288 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
7289 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
7291 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
7292 len
= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x
), 0)));
7293 pos
= GEN_INT (subreg_lsb (XEXP (SET_DEST (x
), 0)));
7295 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
7296 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
7298 inner
= XEXP (SET_DEST (x
), 0);
7299 len
= INTVAL (XEXP (SET_DEST (x
), 1));
7300 pos
= XEXP (SET_DEST (x
), 2);
7302 /* A constant position should stay within the width of INNER. */
7303 if (CONST_INT_P (pos
)
7304 && INTVAL (pos
) + len
> GET_MODE_PRECISION (GET_MODE (inner
)))
7307 if (BITS_BIG_ENDIAN
)
7309 if (CONST_INT_P (pos
))
7310 pos
= GEN_INT (GET_MODE_PRECISION (GET_MODE (inner
)) - len
7312 else if (GET_CODE (pos
) == MINUS
7313 && CONST_INT_P (XEXP (pos
, 1))
7314 && (INTVAL (XEXP (pos
, 1))
7315 == GET_MODE_PRECISION (GET_MODE (inner
)) - len
))
7316 /* If position is ADJUST - X, new position is X. */
7317 pos
= XEXP (pos
, 0);
7320 HOST_WIDE_INT prec
= GET_MODE_PRECISION (GET_MODE (inner
));
7321 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
7322 gen_int_mode (prec
- len
,
7329 /* A SUBREG between two modes that occupy the same numbers of words
7330 can be done by moving the SUBREG to the source. */
7331 else if (GET_CODE (SET_DEST (x
)) == SUBREG
7332 /* We need SUBREGs to compute nonzero_bits properly. */
7333 && nonzero_sign_valid
7334 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
7335 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
7336 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
7337 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
7339 x
= gen_rtx_SET (SUBREG_REG (SET_DEST (x
)),
7341 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
7348 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7349 inner
= SUBREG_REG (inner
);
7351 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7352 if (!is_a
<scalar_int_mode
> (GET_MODE (inner
), &compute_mode
))
7354 /* Don't do anything for vector or complex integral types. */
7355 if (! FLOAT_MODE_P (GET_MODE (inner
)))
7358 /* Try to find an integral mode to pun with. */
7359 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner
)), 0)
7360 .exists (&compute_mode
))
7363 inner
= gen_lowpart (compute_mode
, inner
);
7366 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7367 if (len
>= HOST_BITS_PER_WIDE_INT
)
7370 /* Don't try to compute in too wide unsupported modes. */
7371 if (!targetm
.scalar_mode_supported_p (compute_mode
))
7374 /* Now compute the equivalent expression. Make a copy of INNER
7375 for the SET_DEST in case it is a MEM into which we will substitute;
7376 we don't want shared RTL in that case. */
7377 mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< len
) - 1,
7379 cleared
= simplify_gen_binary (AND
, compute_mode
,
7380 simplify_gen_unary (NOT
, compute_mode
,
7381 simplify_gen_binary (ASHIFT
,
7386 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
7387 simplify_gen_binary (
7389 gen_lowpart (compute_mode
, SET_SRC (x
)),
7393 x
= gen_rtx_SET (copy_rtx (inner
),
7394 simplify_gen_binary (IOR
, compute_mode
,
7401 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7402 it is an RTX that represents the (variable) starting position; otherwise,
7403 POS is the (constant) starting bit position. Both are counted from the LSB.
7405 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7407 IN_DEST is nonzero if this is a reference in the destination of a SET.
7408 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7409 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7412 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7413 ZERO_EXTRACT should be built even for bits starting at bit 0.
7415 MODE is the desired mode of the result (if IN_DEST == 0).
7417 The result is an RTX for the extraction or NULL_RTX if the target
7421 make_extraction (machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7422 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7423 int in_dest
, int in_compare
)
7425 /* This mode describes the size of the storage area
7426 to fetch the overall value from. Within that, we
7427 ignore the POS lowest bits, etc. */
7428 machine_mode is_mode
= GET_MODE (inner
);
7429 machine_mode inner_mode
;
7430 machine_mode wanted_inner_mode
;
7431 machine_mode wanted_inner_reg_mode
= word_mode
;
7432 machine_mode pos_mode
= word_mode
;
7433 machine_mode extraction_mode
= word_mode
;
7435 rtx orig_pos_rtx
= pos_rtx
;
7436 HOST_WIDE_INT orig_pos
;
7438 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7439 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7441 if (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7443 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7444 consider just the QI as the memory to extract from.
7445 The subreg adds or removes high bits; its mode is
7446 irrelevant to the meaning of this extraction,
7447 since POS and LEN count from the lsb. */
7448 if (MEM_P (SUBREG_REG (inner
)))
7449 is_mode
= GET_MODE (SUBREG_REG (inner
));
7450 inner
= SUBREG_REG (inner
);
7452 else if (GET_CODE (inner
) == ASHIFT
7453 && CONST_INT_P (XEXP (inner
, 1))
7454 && pos_rtx
== 0 && pos
== 0
7455 && len
> UINTVAL (XEXP (inner
, 1)))
7457 /* We're extracting the least significant bits of an rtx
7458 (ashift X (const_int C)), where LEN > C. Extract the
7459 least significant (LEN - C) bits of X, giving an rtx
7460 whose mode is MODE, then shift it left C times. */
7461 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7462 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7463 unsignedp
, in_dest
, in_compare
);
7465 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7467 else if (GET_CODE (inner
) == TRUNCATE
)
7468 inner
= XEXP (inner
, 0);
7470 inner_mode
= GET_MODE (inner
);
7472 /* See if this can be done without an extraction. We never can if the
7473 width of the field is not the same as that of some integer mode. For
7474 registers, we can only avoid the extraction if the position is at the
7475 low-order bit and this is either not in the destination or we have the
7476 appropriate STRICT_LOW_PART operation available.
7478 For MEM, we can avoid an extract if the field starts on an appropriate
7479 boundary and we can change the mode of the memory reference. */
7481 scalar_int_mode tmode
;
7482 if (int_mode_for_size (len
, 1).exists (&tmode
)
7483 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7485 && (pos
== 0 || REG_P (inner
))
7486 && (inner_mode
== tmode
7488 || TRULY_NOOP_TRUNCATION_MODES_P (tmode
, inner_mode
)
7489 || reg_truncated_to_mode (tmode
, inner
))
7492 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7493 || (MEM_P (inner
) && pos_rtx
== 0
7495 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7496 : BITS_PER_UNIT
)) == 0
7497 /* We can't do this if we are widening INNER_MODE (it
7498 may not be aligned, for one thing). */
7499 && !paradoxical_subreg_p (tmode
, inner_mode
)
7500 && (inner_mode
== tmode
7501 || (! mode_dependent_address_p (XEXP (inner
, 0),
7502 MEM_ADDR_SPACE (inner
))
7503 && ! MEM_VOLATILE_P (inner
))))))
7505 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7506 field. If the original and current mode are the same, we need not
7507 adjust the offset. Otherwise, we do if bytes big endian.
7509 If INNER is not a MEM, get a piece consisting of just the field
7510 of interest (in this case POS % BITS_PER_WORD must be 0). */
7514 HOST_WIDE_INT offset
;
7516 /* POS counts from lsb, but make OFFSET count in memory order. */
7517 if (BYTES_BIG_ENDIAN
)
7518 offset
= (GET_MODE_PRECISION (is_mode
) - len
- pos
) / BITS_PER_UNIT
;
7520 offset
= pos
/ BITS_PER_UNIT
;
7522 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7524 else if (REG_P (inner
))
7526 if (tmode
!= inner_mode
)
7528 /* We can't call gen_lowpart in a DEST since we
7529 always want a SUBREG (see below) and it would sometimes
7530 return a new hard register. */
7534 = subreg_offset_from_lsb (tmode
, inner_mode
, pos
);
7536 /* Avoid creating invalid subregs, for example when
7537 simplifying (x>>32)&255. */
7538 if (!validate_subreg (tmode
, inner_mode
, inner
, offset
))
7541 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, offset
);
7544 new_rtx
= gen_lowpart (tmode
, inner
);
7550 new_rtx
= force_to_mode (inner
, tmode
,
7551 len
>= HOST_BITS_PER_WIDE_INT
7553 : (HOST_WIDE_INT_1U
<< len
) - 1, 0);
7555 /* If this extraction is going into the destination of a SET,
7556 make a STRICT_LOW_PART unless we made a MEM. */
7559 return (MEM_P (new_rtx
) ? new_rtx
7560 : (GET_CODE (new_rtx
) != SUBREG
7561 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7562 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7567 if (CONST_SCALAR_INT_P (new_rtx
))
7568 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7569 mode
, new_rtx
, tmode
);
7571 /* If we know that no extraneous bits are set, and that the high
7572 bit is not set, convert the extraction to the cheaper of
7573 sign and zero extension, that are equivalent in these cases. */
7574 if (flag_expensive_optimizations
7575 && (HWI_COMPUTABLE_MODE_P (tmode
)
7576 && ((nonzero_bits (new_rtx
, tmode
)
7577 & ~(((unsigned HOST_WIDE_INT
)GET_MODE_MASK (tmode
)) >> 1))
7580 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7581 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7583 /* Prefer ZERO_EXTENSION, since it gives more information to
7585 if (set_src_cost (temp
, mode
, optimize_this_for_speed_p
)
7586 <= set_src_cost (temp1
, mode
, optimize_this_for_speed_p
))
7591 /* Otherwise, sign- or zero-extend unless we already are in the
7594 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7598 /* Unless this is a COMPARE or we have a funny memory reference,
7599 don't do anything with zero-extending field extracts starting at
7600 the low-order bit since they are simple AND operations. */
7601 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7602 && ! in_compare
&& unsignedp
)
7605 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7606 if the position is not a constant and the length is not 1. In all
7607 other cases, we would only be going outside our object in cases when
7608 an original shift would have been undefined. */
7610 && ((pos_rtx
== 0 && pos
+ len
> GET_MODE_PRECISION (is_mode
))
7611 || (pos_rtx
!= 0 && len
!= 1)))
7614 enum extraction_pattern pattern
= (in_dest
? EP_insv
7615 : unsignedp
? EP_extzv
: EP_extv
);
7617 /* If INNER is not from memory, we want it to have the mode of a register
7618 extraction pattern's structure operand, or word_mode if there is no
7619 such pattern. The same applies to extraction_mode and pos_mode
7620 and their respective operands.
7622 For memory, assume that the desired extraction_mode and pos_mode
7623 are the same as for a register operation, since at present we don't
7624 have named patterns for aligned memory structures. */
7625 struct extraction_insn insn
;
7626 if (get_best_reg_extraction_insn (&insn
, pattern
,
7627 GET_MODE_BITSIZE (inner_mode
), mode
))
7629 wanted_inner_reg_mode
= insn
.struct_mode
.require ();
7630 pos_mode
= insn
.pos_mode
;
7631 extraction_mode
= insn
.field_mode
;
7634 /* Never narrow an object, since that might not be safe. */
7636 if (mode
!= VOIDmode
7637 && GET_MODE_SIZE (extraction_mode
) < GET_MODE_SIZE (mode
))
7638 extraction_mode
= mode
;
7641 wanted_inner_mode
= wanted_inner_reg_mode
;
7644 /* Be careful not to go beyond the extracted object and maintain the
7645 natural alignment of the memory. */
7646 wanted_inner_mode
= smallest_int_mode_for_size (len
);
7647 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7648 > GET_MODE_BITSIZE (wanted_inner_mode
))
7649 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
).require ();
7654 if (BITS_BIG_ENDIAN
)
7656 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7657 BITS_BIG_ENDIAN style. If position is constant, compute new
7658 position. Otherwise, build subtraction.
7659 Note that POS is relative to the mode of the original argument.
7660 If it's a MEM we need to recompute POS relative to that.
7661 However, if we're extracting from (or inserting into) a register,
7662 we want to recompute POS relative to wanted_inner_mode. */
7663 int width
= (MEM_P (inner
)
7664 ? GET_MODE_BITSIZE (is_mode
)
7665 : GET_MODE_BITSIZE (wanted_inner_mode
));
7668 pos
= width
- len
- pos
;
7671 = gen_rtx_MINUS (GET_MODE (pos_rtx
),
7672 gen_int_mode (width
- len
, GET_MODE (pos_rtx
)),
7674 /* POS may be less than 0 now, but we check for that below.
7675 Note that it can only be less than 0 if !MEM_P (inner). */
7678 /* If INNER has a wider mode, and this is a constant extraction, try to
7679 make it smaller and adjust the byte to point to the byte containing
7681 if (wanted_inner_mode
!= VOIDmode
7682 && inner_mode
!= wanted_inner_mode
7684 && GET_MODE_SIZE (wanted_inner_mode
) < GET_MODE_SIZE (is_mode
)
7686 && ! mode_dependent_address_p (XEXP (inner
, 0), MEM_ADDR_SPACE (inner
))
7687 && ! MEM_VOLATILE_P (inner
))
7691 /* The computations below will be correct if the machine is big
7692 endian in both bits and bytes or little endian in bits and bytes.
7693 If it is mixed, we must adjust. */
7695 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7696 adjust OFFSET to compensate. */
7697 if (BYTES_BIG_ENDIAN
7698 && paradoxical_subreg_p (is_mode
, inner_mode
))
7699 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7701 /* We can now move to the desired byte. */
7702 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7703 * GET_MODE_SIZE (wanted_inner_mode
);
7704 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7706 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7707 && is_mode
!= wanted_inner_mode
)
7708 offset
= (GET_MODE_SIZE (is_mode
)
7709 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7711 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7714 /* If INNER is not memory, get it into the proper mode. If we are changing
7715 its mode, POS must be a constant and smaller than the size of the new
7717 else if (!MEM_P (inner
))
7719 /* On the LHS, don't create paradoxical subregs implicitely truncating
7720 the register unless TRULY_NOOP_TRUNCATION. */
7722 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner
),
7726 if (GET_MODE (inner
) != wanted_inner_mode
7728 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7734 inner
= force_to_mode (inner
, wanted_inner_mode
,
7736 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7738 : (((HOST_WIDE_INT_1U
<< len
) - 1)
7743 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7744 have to zero extend. Otherwise, we can just use a SUBREG.
7746 We dealt with constant rtxes earlier, so pos_rtx cannot
7747 have VOIDmode at this point. */
7749 && (GET_MODE_SIZE (pos_mode
)
7750 > GET_MODE_SIZE (as_a
<scalar_int_mode
> (GET_MODE (pos_rtx
)))))
7752 rtx temp
= simplify_gen_unary (ZERO_EXTEND
, pos_mode
, pos_rtx
,
7753 GET_MODE (pos_rtx
));
7755 /* If we know that no extraneous bits are set, and that the high
7756 bit is not set, convert extraction to cheaper one - either
7757 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7759 if (flag_expensive_optimizations
7760 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx
))
7761 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7762 & ~(((unsigned HOST_WIDE_INT
)
7763 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7767 rtx temp1
= simplify_gen_unary (SIGN_EXTEND
, pos_mode
, pos_rtx
,
7768 GET_MODE (pos_rtx
));
7770 /* Prefer ZERO_EXTENSION, since it gives more information to
7772 if (set_src_cost (temp1
, pos_mode
, optimize_this_for_speed_p
)
7773 < set_src_cost (temp
, pos_mode
, optimize_this_for_speed_p
))
7779 /* Make POS_RTX unless we already have it and it is correct. If we don't
7780 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7782 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
7783 pos_rtx
= orig_pos_rtx
;
7785 else if (pos_rtx
== 0)
7786 pos_rtx
= GEN_INT (pos
);
7788 /* Make the required operation. See if we can use existing rtx. */
7789 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
7790 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
7792 new_rtx
= gen_lowpart (mode
, new_rtx
);
7797 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7798 can be commuted with any other operations in X. Return X without
7799 that shift if so. */
7802 extract_left_shift (scalar_int_mode mode
, rtx x
, int count
)
7804 enum rtx_code code
= GET_CODE (x
);
7810 /* This is the shift itself. If it is wide enough, we will return
7811 either the value being shifted if the shift count is equal to
7812 COUNT or a shift for the difference. */
7813 if (CONST_INT_P (XEXP (x
, 1))
7814 && INTVAL (XEXP (x
, 1)) >= count
)
7815 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
7816 INTVAL (XEXP (x
, 1)) - count
);
7820 if ((tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
7821 return simplify_gen_unary (code
, mode
, tem
, mode
);
7825 case PLUS
: case IOR
: case XOR
: case AND
:
7826 /* If we can safely shift this constant and we find the inner shift,
7827 make a new operation. */
7828 if (CONST_INT_P (XEXP (x
, 1))
7829 && (UINTVAL (XEXP (x
, 1))
7830 & (((HOST_WIDE_INT_1U
<< count
)) - 1)) == 0
7831 && (tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
7833 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1)) >> count
;
7834 return simplify_gen_binary (code
, mode
, tem
,
7835 gen_int_mode (val
, mode
));
7846 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7847 level of the expression and MODE is its mode. IN_CODE is as for
7848 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7849 that should be used when recursing on operands of *X_PTR.
7851 There are two possible actions:
7853 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7854 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7856 - Return a new rtx, which the caller returns directly. */
7859 make_compound_operation_int (scalar_int_mode mode
, rtx
*x_ptr
,
7860 enum rtx_code in_code
,
7861 enum rtx_code
*next_code_ptr
)
7864 enum rtx_code next_code
= *next_code_ptr
;
7865 enum rtx_code code
= GET_CODE (x
);
7866 int mode_width
= GET_MODE_PRECISION (mode
);
7871 scalar_int_mode inner_mode
;
7872 bool equality_comparison
= false;
7876 equality_comparison
= true;
7880 /* Process depending on the code of this operation. If NEW is set
7881 nonzero, it will be returned. */
7886 /* Convert shifts by constants into multiplications if inside
7888 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
7889 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
7890 && INTVAL (XEXP (x
, 1)) >= 0)
7892 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
7893 HOST_WIDE_INT multval
= HOST_WIDE_INT_1
<< count
;
7895 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7896 if (GET_CODE (new_rtx
) == NEG
)
7898 new_rtx
= XEXP (new_rtx
, 0);
7901 multval
= trunc_int_for_mode (multval
, mode
);
7902 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, gen_int_mode (multval
, mode
));
7909 lhs
= make_compound_operation (lhs
, next_code
);
7910 rhs
= make_compound_operation (rhs
, next_code
);
7911 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
)
7913 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
7915 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7917 else if (GET_CODE (lhs
) == MULT
7918 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
7920 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
7921 simplify_gen_unary (NEG
, mode
,
7924 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7928 SUBST (XEXP (x
, 0), lhs
);
7929 SUBST (XEXP (x
, 1), rhs
);
7931 maybe_swap_commutative_operands (x
);
7937 lhs
= make_compound_operation (lhs
, next_code
);
7938 rhs
= make_compound_operation (rhs
, next_code
);
7939 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
)
7941 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
7943 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7945 else if (GET_CODE (rhs
) == MULT
7946 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
7948 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
7949 simplify_gen_unary (NEG
, mode
,
7952 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7956 SUBST (XEXP (x
, 0), lhs
);
7957 SUBST (XEXP (x
, 1), rhs
);
7962 /* If the second operand is not a constant, we can't do anything
7964 if (!CONST_INT_P (XEXP (x
, 1)))
7967 /* If the constant is a power of two minus one and the first operand
7968 is a logical right shift, make an extraction. */
7969 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7970 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7972 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7973 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1), i
, 1,
7974 0, in_code
== COMPARE
);
7977 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7978 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
7979 && subreg_lowpart_p (XEXP (x
, 0))
7980 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (XEXP (x
, 0))),
7982 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
7983 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7985 rtx inner_x0
= SUBREG_REG (XEXP (x
, 0));
7986 new_rtx
= make_compound_operation (XEXP (inner_x0
, 0), next_code
);
7987 new_rtx
= make_extraction (inner_mode
, new_rtx
, 0,
7989 i
, 1, 0, in_code
== COMPARE
);
7991 /* If we narrowed the mode when dropping the subreg, then we lose. */
7992 if (GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (mode
))
7995 /* If that didn't give anything, see if the AND simplifies on
7997 if (!new_rtx
&& i
>= 0)
7999 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8000 new_rtx
= make_extraction (mode
, new_rtx
, 0, NULL_RTX
, i
, 1,
8001 0, in_code
== COMPARE
);
8004 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8005 else if ((GET_CODE (XEXP (x
, 0)) == XOR
8006 || GET_CODE (XEXP (x
, 0)) == IOR
)
8007 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
8008 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
8009 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8011 /* Apply the distributive law, and then try to make extractions. */
8012 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
8013 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
8015 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
8017 new_rtx
= make_compound_operation (new_rtx
, in_code
);
8020 /* If we are have (and (rotate X C) M) and C is larger than the number
8021 of bits in M, this is an extraction. */
8023 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
8024 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8025 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
8026 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
8028 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8029 new_rtx
= make_extraction (mode
, new_rtx
,
8030 (GET_MODE_PRECISION (mode
)
8031 - INTVAL (XEXP (XEXP (x
, 0), 1))),
8032 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8035 /* On machines without logical shifts, if the operand of the AND is
8036 a logical shift and our mask turns off all the propagated sign
8037 bits, we can replace the logical shift with an arithmetic shift. */
8038 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8039 && !have_insn_for (LSHIFTRT
, mode
)
8040 && have_insn_for (ASHIFTRT
, mode
)
8041 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8042 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8043 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8044 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
8046 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
8048 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
8049 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
8051 gen_rtx_ASHIFTRT (mode
,
8052 make_compound_operation
8053 (XEXP (XEXP (x
, 0), 0), next_code
),
8054 XEXP (XEXP (x
, 0), 1)));
8057 /* If the constant is one less than a power of two, this might be
8058 representable by an extraction even if no shift is present.
8059 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8060 we are in a COMPARE. */
8061 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8062 new_rtx
= make_extraction (mode
,
8063 make_compound_operation (XEXP (x
, 0),
8065 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8067 /* If we are in a comparison and this is an AND with a power of two,
8068 convert this into the appropriate bit extract. */
8069 else if (in_code
== COMPARE
8070 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
8071 && (equality_comparison
|| i
< GET_MODE_PRECISION (mode
) - 1))
8072 new_rtx
= make_extraction (mode
,
8073 make_compound_operation (XEXP (x
, 0),
8075 i
, NULL_RTX
, 1, 1, 0, 1);
8077 /* If the one operand is a paradoxical subreg of a register or memory and
8078 the constant (limited to the smaller mode) has only zero bits where
8079 the sub expression has known zero bits, this can be expressed as
8081 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
)
8085 sub
= XEXP (XEXP (x
, 0), 0);
8086 machine_mode sub_mode
= GET_MODE (sub
);
8087 if ((REG_P (sub
) || MEM_P (sub
))
8088 && GET_MODE_PRECISION (sub_mode
) < mode_width
)
8090 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (sub_mode
);
8091 unsigned HOST_WIDE_INT mask
;
8093 /* original AND constant with all the known zero bits set */
8094 mask
= UINTVAL (XEXP (x
, 1)) | (~nonzero_bits (sub
, sub_mode
));
8095 if ((mask
& mode_mask
) == mode_mask
)
8097 new_rtx
= make_compound_operation (sub
, next_code
);
8098 new_rtx
= make_extraction (mode
, new_rtx
, 0, 0,
8099 GET_MODE_PRECISION (sub_mode
),
8100 1, 0, in_code
== COMPARE
);
8108 /* If the sign bit is known to be zero, replace this with an
8109 arithmetic shift. */
8110 if (have_insn_for (ASHIFTRT
, mode
)
8111 && ! have_insn_for (LSHIFTRT
, mode
)
8112 && mode_width
<= HOST_BITS_PER_WIDE_INT
8113 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
8115 new_rtx
= gen_rtx_ASHIFTRT (mode
,
8116 make_compound_operation (XEXP (x
, 0),
8128 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8129 this is a SIGN_EXTRACT. */
8130 if (CONST_INT_P (rhs
)
8131 && GET_CODE (lhs
) == ASHIFT
8132 && CONST_INT_P (XEXP (lhs
, 1))
8133 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
8134 && INTVAL (XEXP (lhs
, 1)) >= 0
8135 && INTVAL (rhs
) < mode_width
)
8137 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
8138 new_rtx
= make_extraction (mode
, new_rtx
,
8139 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
8140 NULL_RTX
, mode_width
- INTVAL (rhs
),
8141 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8145 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8146 If so, try to merge the shifts into a SIGN_EXTEND. We could
8147 also do this for some cases of SIGN_EXTRACT, but it doesn't
8148 seem worth the effort; the case checked for occurs on Alpha. */
8151 && ! (GET_CODE (lhs
) == SUBREG
8152 && (OBJECT_P (SUBREG_REG (lhs
))))
8153 && CONST_INT_P (rhs
)
8154 && INTVAL (rhs
) >= 0
8155 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
8156 && INTVAL (rhs
) < mode_width
8157 && (new_rtx
= extract_left_shift (mode
, lhs
, INTVAL (rhs
))) != 0)
8158 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
, next_code
),
8159 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
8160 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8165 /* Call ourselves recursively on the inner expression. If we are
8166 narrowing the object and it has a different RTL code from
8167 what it originally did, do this SUBREG as a force_to_mode. */
8169 rtx inner
= SUBREG_REG (x
), simplified
;
8170 enum rtx_code subreg_code
= in_code
;
8172 /* If the SUBREG is masking of a logical right shift,
8173 make an extraction. */
8174 if (GET_CODE (inner
) == LSHIFTRT
8175 && is_a
<scalar_int_mode
> (GET_MODE (inner
), &inner_mode
)
8176 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (inner_mode
)
8177 && CONST_INT_P (XEXP (inner
, 1))
8178 && UINTVAL (XEXP (inner
, 1)) < GET_MODE_PRECISION (inner_mode
)
8179 && subreg_lowpart_p (x
))
8181 new_rtx
= make_compound_operation (XEXP (inner
, 0), next_code
);
8182 int width
= GET_MODE_PRECISION (inner_mode
)
8183 - INTVAL (XEXP (inner
, 1));
8184 if (width
> mode_width
)
8186 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (inner
, 1),
8187 width
, 1, 0, in_code
== COMPARE
);
8191 /* If in_code is COMPARE, it isn't always safe to pass it through
8192 to the recursive make_compound_operation call. */
8193 if (subreg_code
== COMPARE
8194 && (!subreg_lowpart_p (x
)
8195 || GET_CODE (inner
) == SUBREG
8196 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8197 is (const_int 0), rather than
8198 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8199 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8200 for non-equality comparisons against 0 is not equivalent
8201 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8202 || (GET_CODE (inner
) == AND
8203 && CONST_INT_P (XEXP (inner
, 1))
8204 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8205 && exact_log2 (UINTVAL (XEXP (inner
, 1)))
8206 >= GET_MODE_BITSIZE (mode
) - 1)))
8209 tem
= make_compound_operation (inner
, subreg_code
);
8212 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
8216 if (GET_CODE (tem
) != GET_CODE (inner
)
8217 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8218 && subreg_lowpart_p (x
))
8221 = force_to_mode (tem
, mode
, HOST_WIDE_INT_M1U
, 0);
8223 /* If we have something other than a SUBREG, we might have
8224 done an expansion, so rerun ourselves. */
8225 if (GET_CODE (newer
) != SUBREG
)
8226 newer
= make_compound_operation (newer
, in_code
);
8228 /* force_to_mode can expand compounds. If it just re-expanded the
8229 compound, use gen_lowpart to convert to the desired mode. */
8230 if (rtx_equal_p (newer
, x
)
8231 /* Likewise if it re-expanded the compound only partially.
8232 This happens for SUBREG of ZERO_EXTRACT if they extract
8233 the same number of bits. */
8234 || (GET_CODE (newer
) == SUBREG
8235 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
8236 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
8237 && GET_CODE (inner
) == AND
8238 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
8239 return gen_lowpart (GET_MODE (x
), tem
);
8254 *x_ptr
= gen_lowpart (mode
, new_rtx
);
8255 *next_code_ptr
= next_code
;
8259 /* Look at the expression rooted at X. Look for expressions
8260 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8261 Form these expressions.
8263 Return the new rtx, usually just X.
8265 Also, for machines like the VAX that don't have logical shift insns,
8266 try to convert logical to arithmetic shift operations in cases where
8267 they are equivalent. This undoes the canonicalizations to logical
8268 shifts done elsewhere.
8270 We try, as much as possible, to re-use rtl expressions to save memory.
8272 IN_CODE says what kind of expression we are processing. Normally, it is
8273 SET. In a memory address it is MEM. When processing the arguments of
8274 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8275 precisely it is an equality comparison against zero. */
8278 make_compound_operation (rtx x
, enum rtx_code in_code
)
8280 enum rtx_code code
= GET_CODE (x
);
8283 enum rtx_code next_code
;
8286 /* Select the code to be used in recursive calls. Once we are inside an
8287 address, we stay there. If we have a comparison, set to COMPARE,
8288 but once inside, go back to our default of SET. */
8290 next_code
= (code
== MEM
? MEM
8291 : ((code
== COMPARE
|| COMPARISON_P (x
))
8292 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
8293 : in_code
== COMPARE
|| in_code
== EQ
? SET
: in_code
);
8295 scalar_int_mode mode
;
8296 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
8298 rtx new_rtx
= make_compound_operation_int (mode
, &x
, in_code
,
8302 code
= GET_CODE (x
);
8305 /* Now recursively process each operand of this operation. We need to
8306 handle ZERO_EXTEND specially so that we don't lose track of the
8308 if (code
== ZERO_EXTEND
)
8310 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8311 tem
= simplify_const_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8312 new_rtx
, GET_MODE (XEXP (x
, 0)));
8315 SUBST (XEXP (x
, 0), new_rtx
);
8319 fmt
= GET_RTX_FORMAT (code
);
8320 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
8323 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
8324 SUBST (XEXP (x
, i
), new_rtx
);
8326 else if (fmt
[i
] == 'E')
8327 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8329 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
8330 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
8333 maybe_swap_commutative_operands (x
);
8337 /* Given M see if it is a value that would select a field of bits
8338 within an item, but not the entire word. Return -1 if not.
8339 Otherwise, return the starting position of the field, where 0 is the
8342 *PLEN is set to the length of the field. */
8345 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
8347 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8348 int pos
= m
? ctz_hwi (m
) : -1;
8352 /* Now shift off the low-order zero bits and see if we have a
8353 power of two minus 1. */
8354 len
= exact_log2 ((m
>> pos
) + 1);
8363 /* If X refers to a register that equals REG in value, replace these
8364 references with REG. */
8366 canon_reg_for_combine (rtx x
, rtx reg
)
8373 enum rtx_code code
= GET_CODE (x
);
8374 switch (GET_RTX_CLASS (code
))
8377 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8378 if (op0
!= XEXP (x
, 0))
8379 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
8384 case RTX_COMM_ARITH
:
8385 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8386 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8387 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8388 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
8392 case RTX_COMM_COMPARE
:
8393 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8394 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8395 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8396 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
8397 GET_MODE (op0
), op0
, op1
);
8401 case RTX_BITFIELD_OPS
:
8402 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8403 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8404 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
8405 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
8406 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
8407 GET_MODE (op0
), op0
, op1
, op2
);
8413 if (rtx_equal_p (get_last_value (reg
), x
)
8414 || rtx_equal_p (reg
, get_last_value (x
)))
8423 fmt
= GET_RTX_FORMAT (code
);
8425 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8428 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
8429 if (op
!= XEXP (x
, i
))
8439 else if (fmt
[i
] == 'E')
8442 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8444 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
8445 if (op
!= XVECEXP (x
, i
, j
))
8452 XVECEXP (x
, i
, j
) = op
;
8463 /* Return X converted to MODE. If the value is already truncated to
8464 MODE we can just return a subreg even though in the general case we
8465 would need an explicit truncation. */
8468 gen_lowpart_or_truncate (machine_mode mode
, rtx x
)
8470 if (!CONST_INT_P (x
)
8471 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (x
))
8472 && !TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (x
))
8473 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
8475 /* Bit-cast X into an integer mode. */
8476 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
8477 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)).require (), x
);
8478 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
).require (),
8482 return gen_lowpart (mode
, x
);
8485 /* See if X can be simplified knowing that we will only refer to it in
8486 MODE and will only refer to those bits that are nonzero in MASK.
8487 If other bits are being computed or if masking operations are done
8488 that select a superset of the bits in MASK, they can sometimes be
8491 Return a possibly simplified expression, but always convert X to
8492 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8494 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8495 are all off in X. This is used when X will be complemented, by either
8496 NOT, NEG, or XOR. */
8499 force_to_mode (rtx x
, machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8502 enum rtx_code code
= GET_CODE (x
);
8503 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8504 machine_mode op_mode
;
8505 unsigned HOST_WIDE_INT nonzero
;
8507 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8508 code below will do the wrong thing since the mode of such an
8509 expression is VOIDmode.
8511 Also do nothing if X is a CLOBBER; this can happen if X was
8512 the return value from a call to gen_lowpart. */
8513 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8516 /* We want to perform the operation in its present mode unless we know
8517 that the operation is valid in MODE, in which case we do the operation
8519 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8520 && have_insn_for (code
, mode
))
8521 ? mode
: GET_MODE (x
));
8523 /* It is not valid to do a right-shift in a narrower mode
8524 than the one it came in with. */
8525 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8526 && GET_MODE_PRECISION (mode
) < GET_MODE_PRECISION (GET_MODE (x
)))
8527 op_mode
= GET_MODE (x
);
8529 /* Truncate MASK to fit OP_MODE. */
8531 mask
&= GET_MODE_MASK (op_mode
);
8533 /* Determine what bits of X are guaranteed to be (non)zero. */
8534 nonzero
= nonzero_bits (x
, mode
);
8536 /* If none of the bits in X are needed, return a zero. */
8537 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8540 /* If X is a CONST_INT, return a new one. Do this here since the
8541 test below will fail. */
8542 if (CONST_INT_P (x
))
8544 if (SCALAR_INT_MODE_P (mode
))
8545 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8548 x
= GEN_INT (INTVAL (x
) & mask
);
8549 return gen_lowpart_common (mode
, x
);
8553 /* If X is narrower than MODE and we want all the bits in X's mode, just
8554 get X in the proper mode. */
8555 if (paradoxical_subreg_p (mode
, GET_MODE (x
))
8556 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8557 return gen_lowpart (mode
, x
);
8559 /* We can ignore the effect of a SUBREG if it narrows the mode or
8560 if the constant masks to zero all the bits the mode doesn't have. */
8561 if (GET_CODE (x
) == SUBREG
8562 && subreg_lowpart_p (x
)
8563 && ((GET_MODE_SIZE (GET_MODE (x
))
8564 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
8566 & GET_MODE_MASK (GET_MODE (x
))
8567 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))))))
8568 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8570 scalar_int_mode int_mode
, xmode
;
8571 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
8572 && is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
8573 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8575 return force_int_to_mode (x
, int_mode
, xmode
,
8576 as_a
<scalar_int_mode
> (op_mode
),
8579 return gen_lowpart_or_truncate (mode
, x
);
8582 /* Subroutine of force_to_mode that handles cases in which both X and
8583 the result are scalar integers. MODE is the mode of the result,
8584 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8585 is preferred for simplified versions of X. The other arguments
8586 are as for force_to_mode. */
8589 force_int_to_mode (rtx x
, scalar_int_mode mode
, scalar_int_mode xmode
,
8590 scalar_int_mode op_mode
, unsigned HOST_WIDE_INT mask
,
8593 enum rtx_code code
= GET_CODE (x
);
8594 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8595 unsigned HOST_WIDE_INT fuller_mask
;
8598 /* When we have an arithmetic operation, or a shift whose count we
8599 do not know, we need to assume that all bits up to the highest-order
8600 bit in MASK will be needed. This is how we form such a mask. */
8601 if (mask
& (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
8602 fuller_mask
= HOST_WIDE_INT_M1U
;
8604 fuller_mask
= ((HOST_WIDE_INT_1U
<< (floor_log2 (mask
) + 1))
8610 /* If X is a (clobber (const_int)), return it since we know we are
8611 generating something that won't match. */
8618 x
= expand_compound_operation (x
);
8619 if (GET_CODE (x
) != code
)
8620 return force_to_mode (x
, mode
, mask
, next_select
);
8624 /* Similarly for a truncate. */
8625 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8628 /* If this is an AND with a constant, convert it into an AND
8629 whose constant is the AND of that constant with MASK. If it
8630 remains an AND of MASK, delete it since it is redundant. */
8632 if (CONST_INT_P (XEXP (x
, 1)))
8634 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8635 mask
& INTVAL (XEXP (x
, 1)));
8638 /* If X is still an AND, see if it is an AND with a mask that
8639 is just some low-order bits. If so, and it is MASK, we don't
8642 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8643 && (INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (xmode
)) == mask
)
8646 /* If it remains an AND, try making another AND with the bits
8647 in the mode mask that aren't in MASK turned on. If the
8648 constant in the AND is wide enough, this might make a
8649 cheaper constant. */
8651 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8652 && GET_MODE_MASK (xmode
) != mask
8653 && HWI_COMPUTABLE_MODE_P (xmode
))
8655 unsigned HOST_WIDE_INT cval
8656 = UINTVAL (XEXP (x
, 1)) | (GET_MODE_MASK (xmode
) & ~mask
);
8659 y
= simplify_gen_binary (AND
, xmode
, XEXP (x
, 0),
8660 gen_int_mode (cval
, xmode
));
8661 if (set_src_cost (y
, xmode
, optimize_this_for_speed_p
)
8662 < set_src_cost (x
, xmode
, optimize_this_for_speed_p
))
8672 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8673 low-order bits (as in an alignment operation) and FOO is already
8674 aligned to that boundary, mask C1 to that boundary as well.
8675 This may eliminate that PLUS and, later, the AND. */
8678 unsigned int width
= GET_MODE_PRECISION (mode
);
8679 unsigned HOST_WIDE_INT smask
= mask
;
8681 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8682 number, sign extend it. */
8684 if (width
< HOST_BITS_PER_WIDE_INT
8685 && (smask
& (HOST_WIDE_INT_1U
<< (width
- 1))) != 0)
8686 smask
|= HOST_WIDE_INT_M1U
<< width
;
8688 if (CONST_INT_P (XEXP (x
, 1))
8689 && pow2p_hwi (- smask
)
8690 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8691 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8692 return force_to_mode (plus_constant (xmode
, XEXP (x
, 0),
8693 (INTVAL (XEXP (x
, 1)) & smask
)),
8694 mode
, smask
, next_select
);
8700 /* Substituting into the operands of a widening MULT is not likely to
8701 create RTL matching a machine insn. */
8703 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
8704 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
8705 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
8706 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
8707 && REG_P (XEXP (XEXP (x
, 0), 0))
8708 && REG_P (XEXP (XEXP (x
, 1), 0)))
8709 return gen_lowpart_or_truncate (mode
, x
);
8711 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8712 most significant bit in MASK since carries from those bits will
8713 affect the bits we are interested in. */
8718 /* If X is (minus C Y) where C's least set bit is larger than any bit
8719 in the mask, then we may replace with (neg Y). */
8720 if (CONST_INT_P (XEXP (x
, 0))
8721 && least_bit_hwi (UINTVAL (XEXP (x
, 0))) > mask
)
8723 x
= simplify_gen_unary (NEG
, xmode
, XEXP (x
, 1), xmode
);
8724 return force_to_mode (x
, mode
, mask
, next_select
);
8727 /* Similarly, if C contains every bit in the fuller_mask, then we may
8728 replace with (not Y). */
8729 if (CONST_INT_P (XEXP (x
, 0))
8730 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8732 x
= simplify_gen_unary (NOT
, xmode
, XEXP (x
, 1), xmode
);
8733 return force_to_mode (x
, mode
, mask
, next_select
);
8741 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8742 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8743 operation which may be a bitfield extraction. Ensure that the
8744 constant we form is not wider than the mode of X. */
8746 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8747 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8748 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8749 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8750 && CONST_INT_P (XEXP (x
, 1))
8751 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8752 + floor_log2 (INTVAL (XEXP (x
, 1))))
8753 < GET_MODE_PRECISION (xmode
))
8754 && (UINTVAL (XEXP (x
, 1))
8755 & ~nonzero_bits (XEXP (x
, 0), xmode
)) == 0)
8757 temp
= gen_int_mode ((INTVAL (XEXP (x
, 1)) & mask
)
8758 << INTVAL (XEXP (XEXP (x
, 0), 1)),
8760 temp
= simplify_gen_binary (GET_CODE (x
), xmode
,
8761 XEXP (XEXP (x
, 0), 0), temp
);
8762 x
= simplify_gen_binary (LSHIFTRT
, xmode
, temp
,
8763 XEXP (XEXP (x
, 0), 1));
8764 return force_to_mode (x
, mode
, mask
, next_select
);
8768 /* For most binary operations, just propagate into the operation and
8769 change the mode if we have an operation of that mode. */
8771 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8772 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
8774 /* If we ended up truncating both operands, truncate the result of the
8775 operation instead. */
8776 if (GET_CODE (op0
) == TRUNCATE
8777 && GET_CODE (op1
) == TRUNCATE
)
8779 op0
= XEXP (op0
, 0);
8780 op1
= XEXP (op1
, 0);
8783 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
8784 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
8786 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8788 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
8794 /* For left shifts, do the same, but just for the first operand.
8795 However, we cannot do anything with shifts where we cannot
8796 guarantee that the counts are smaller than the size of the mode
8797 because such a count will have a different meaning in a
8800 if (! (CONST_INT_P (XEXP (x
, 1))
8801 && INTVAL (XEXP (x
, 1)) >= 0
8802 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (mode
))
8803 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
8804 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
8805 < (unsigned HOST_WIDE_INT
) GET_MODE_PRECISION (mode
))))
8808 /* If the shift count is a constant and we can do arithmetic in
8809 the mode of the shift, refine which bits we need. Otherwise, use the
8810 conservative form of the mask. */
8811 if (CONST_INT_P (XEXP (x
, 1))
8812 && INTVAL (XEXP (x
, 1)) >= 0
8813 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (op_mode
)
8814 && HWI_COMPUTABLE_MODE_P (op_mode
))
8815 mask
>>= INTVAL (XEXP (x
, 1));
8819 op0
= gen_lowpart_or_truncate (op_mode
,
8820 force_to_mode (XEXP (x
, 0), op_mode
,
8821 mask
, next_select
));
8823 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
8825 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
8831 /* Here we can only do something if the shift count is a constant,
8832 this shift constant is valid for the host, and we can do arithmetic
8835 if (CONST_INT_P (XEXP (x
, 1))
8836 && INTVAL (XEXP (x
, 1)) >= 0
8837 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8838 && HWI_COMPUTABLE_MODE_P (op_mode
))
8840 rtx inner
= XEXP (x
, 0);
8841 unsigned HOST_WIDE_INT inner_mask
;
8843 /* Select the mask of the bits we need for the shift operand. */
8844 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
8846 /* We can only change the mode of the shift if we can do arithmetic
8847 in the mode of the shift and INNER_MASK is no wider than the
8848 width of X's mode. */
8849 if ((inner_mask
& ~GET_MODE_MASK (xmode
)) != 0)
8852 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
8854 if (xmode
!= op_mode
|| inner
!= XEXP (x
, 0))
8856 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
8861 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8862 shift and AND produces only copies of the sign bit (C2 is one less
8863 than a power of two), we can do this with just a shift. */
8865 if (GET_CODE (x
) == LSHIFTRT
8866 && CONST_INT_P (XEXP (x
, 1))
8867 /* The shift puts one of the sign bit copies in the least significant
8869 && ((INTVAL (XEXP (x
, 1))
8870 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
8871 >= GET_MODE_PRECISION (xmode
))
8872 && pow2p_hwi (mask
+ 1)
8873 /* Number of bits left after the shift must be more than the mask
8875 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
8876 <= GET_MODE_PRECISION (xmode
))
8877 /* Must be more sign bit copies than the mask needs. */
8878 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
8879 >= exact_log2 (mask
+ 1)))
8880 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0),
8881 GEN_INT (GET_MODE_PRECISION (xmode
)
8882 - exact_log2 (mask
+ 1)));
8887 /* If we are just looking for the sign bit, we don't need this shift at
8888 all, even if it has a variable count. */
8889 if (val_signbit_p (xmode
, mask
))
8890 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8892 /* If this is a shift by a constant, get a mask that contains those bits
8893 that are not copies of the sign bit. We then have two cases: If
8894 MASK only includes those bits, this can be a logical shift, which may
8895 allow simplifications. If MASK is a single-bit field not within
8896 those bits, we are requesting a copy of the sign bit and hence can
8897 shift the sign bit to the appropriate location. */
8899 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
8900 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
8902 unsigned HOST_WIDE_INT nonzero
;
8905 /* If the considered data is wider than HOST_WIDE_INT, we can't
8906 represent a mask for all its bits in a single scalar.
8907 But we only care about the lower bits, so calculate these. */
8909 if (GET_MODE_PRECISION (xmode
) > HOST_BITS_PER_WIDE_INT
)
8911 nonzero
= HOST_WIDE_INT_M1U
;
8913 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8914 is the number of bits a full-width mask would have set.
8915 We need only shift if these are fewer than nonzero can
8916 hold. If not, we must keep all bits set in nonzero. */
8918 if (GET_MODE_PRECISION (xmode
) - INTVAL (XEXP (x
, 1))
8919 < HOST_BITS_PER_WIDE_INT
)
8920 nonzero
>>= INTVAL (XEXP (x
, 1))
8921 + HOST_BITS_PER_WIDE_INT
8922 - GET_MODE_PRECISION (xmode
);
8926 nonzero
= GET_MODE_MASK (xmode
);
8927 nonzero
>>= INTVAL (XEXP (x
, 1));
8930 if ((mask
& ~nonzero
) == 0)
8932 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, xmode
,
8933 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
8934 if (GET_CODE (x
) != ASHIFTRT
)
8935 return force_to_mode (x
, mode
, mask
, next_select
);
8938 else if ((i
= exact_log2 (mask
)) >= 0)
8940 x
= simplify_shift_const
8941 (NULL_RTX
, LSHIFTRT
, xmode
, XEXP (x
, 0),
8942 GET_MODE_PRECISION (xmode
) - 1 - i
);
8944 if (GET_CODE (x
) != ASHIFTRT
)
8945 return force_to_mode (x
, mode
, mask
, next_select
);
8949 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8950 even if the shift count isn't a constant. */
8952 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0), XEXP (x
, 1));
8956 /* If this is a zero- or sign-extension operation that just affects bits
8957 we don't care about, remove it. Be sure the call above returned
8958 something that is still a shift. */
8960 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
8961 && CONST_INT_P (XEXP (x
, 1))
8962 && INTVAL (XEXP (x
, 1)) >= 0
8963 && (INTVAL (XEXP (x
, 1))
8964 <= GET_MODE_PRECISION (xmode
) - (floor_log2 (mask
) + 1))
8965 && GET_CODE (XEXP (x
, 0)) == ASHIFT
8966 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
8967 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
8974 /* If the shift count is constant and we can do computations
8975 in the mode of X, compute where the bits we care about are.
8976 Otherwise, we can't do anything. Don't change the mode of
8977 the shift or propagate MODE into the shift, though. */
8978 if (CONST_INT_P (XEXP (x
, 1))
8979 && INTVAL (XEXP (x
, 1)) >= 0)
8981 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
8982 xmode
, gen_int_mode (mask
, xmode
),
8984 if (temp
&& CONST_INT_P (temp
))
8985 x
= simplify_gen_binary (code
, xmode
,
8986 force_to_mode (XEXP (x
, 0), xmode
,
8987 INTVAL (temp
), next_select
),
8993 /* If we just want the low-order bit, the NEG isn't needed since it
8994 won't change the low-order bit. */
8996 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
8998 /* We need any bits less significant than the most significant bit in
8999 MASK since carries from those bits will affect the bits we are
9005 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9006 same as the XOR case above. Ensure that the constant we form is not
9007 wider than the mode of X. */
9009 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
9010 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
9011 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
9012 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
9013 < GET_MODE_PRECISION (xmode
))
9014 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
9016 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)), xmode
);
9017 temp
= simplify_gen_binary (XOR
, xmode
, XEXP (XEXP (x
, 0), 0), temp
);
9018 x
= simplify_gen_binary (LSHIFTRT
, xmode
,
9019 temp
, XEXP (XEXP (x
, 0), 1));
9021 return force_to_mode (x
, mode
, mask
, next_select
);
9024 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9025 use the full mask inside the NOT. */
9029 op0
= gen_lowpart_or_truncate (op_mode
,
9030 force_to_mode (XEXP (x
, 0), mode
, mask
,
9032 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
9034 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
9040 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9041 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9042 which is equal to STORE_FLAG_VALUE. */
9043 if ((mask
& ~STORE_FLAG_VALUE
) == 0
9044 && XEXP (x
, 1) == const0_rtx
9045 && GET_MODE (XEXP (x
, 0)) == mode
9046 && pow2p_hwi (nonzero_bits (XEXP (x
, 0), mode
))
9047 && (nonzero_bits (XEXP (x
, 0), mode
)
9048 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
9049 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
9054 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9055 written in a narrower mode. We play it safe and do not do so. */
9057 op0
= gen_lowpart_or_truncate (xmode
,
9058 force_to_mode (XEXP (x
, 1), mode
,
9059 mask
, next_select
));
9060 op1
= gen_lowpart_or_truncate (xmode
,
9061 force_to_mode (XEXP (x
, 2), mode
,
9062 mask
, next_select
));
9063 if (op0
!= XEXP (x
, 1) || op1
!= XEXP (x
, 2))
9064 x
= simplify_gen_ternary (IF_THEN_ELSE
, xmode
,
9065 GET_MODE (XEXP (x
, 0)), XEXP (x
, 0),
9073 /* Ensure we return a value of the proper mode. */
9074 return gen_lowpart_or_truncate (mode
, x
);
9077 /* Return nonzero if X is an expression that has one of two values depending on
9078 whether some other value is zero or nonzero. In that case, we return the
9079 value that is being tested, *PTRUE is set to the value if the rtx being
9080 returned has a nonzero value, and *PFALSE is set to the other alternative.
9082 If we return zero, we set *PTRUE and *PFALSE to X. */
9085 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
9087 machine_mode mode
= GET_MODE (x
);
9088 enum rtx_code code
= GET_CODE (x
);
9089 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
9090 unsigned HOST_WIDE_INT nz
;
9091 scalar_int_mode int_mode
;
9093 /* If we are comparing a value against zero, we are done. */
9094 if ((code
== NE
|| code
== EQ
)
9095 && XEXP (x
, 1) == const0_rtx
)
9097 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
9098 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
9102 /* If this is a unary operation whose operand has one of two values, apply
9103 our opcode to compute those values. */
9104 else if (UNARY_P (x
)
9105 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
9107 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
9108 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
9109 GET_MODE (XEXP (x
, 0)));
9113 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9114 make can't possibly match and would suppress other optimizations. */
9115 else if (code
== COMPARE
)
9118 /* If this is a binary operation, see if either side has only one of two
9119 values. If either one does or if both do and they are conditional on
9120 the same value, compute the new true and false values. */
9121 else if (BINARY_P (x
))
9123 rtx op0
= XEXP (x
, 0);
9124 rtx op1
= XEXP (x
, 1);
9125 cond0
= if_then_else_cond (op0
, &true0
, &false0
);
9126 cond1
= if_then_else_cond (op1
, &true1
, &false1
);
9128 if ((cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
))
9129 && (REG_P (op0
) || REG_P (op1
)))
9131 /* Try to enable a simplification by undoing work done by
9132 if_then_else_cond if it converted a REG into something more
9137 true0
= false0
= op0
;
9142 true1
= false1
= op1
;
9146 if ((cond0
!= 0 || cond1
!= 0)
9147 && ! (cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
)))
9149 /* If if_then_else_cond returned zero, then true/false are the
9150 same rtl. We must copy one of them to prevent invalid rtl
9153 true0
= copy_rtx (true0
);
9154 else if (cond1
== 0)
9155 true1
= copy_rtx (true1
);
9157 if (COMPARISON_P (x
))
9159 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
9161 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
9166 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
9167 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
9170 return cond0
? cond0
: cond1
;
9173 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9174 operands is zero when the other is nonzero, and vice-versa,
9175 and STORE_FLAG_VALUE is 1 or -1. */
9177 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9178 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
9180 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9182 rtx op0
= XEXP (XEXP (x
, 0), 1);
9183 rtx op1
= XEXP (XEXP (x
, 1), 1);
9185 cond0
= XEXP (XEXP (x
, 0), 0);
9186 cond1
= XEXP (XEXP (x
, 1), 0);
9188 if (COMPARISON_P (cond0
)
9189 && COMPARISON_P (cond1
)
9190 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9191 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9192 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9193 || ((swap_condition (GET_CODE (cond0
))
9194 == reversed_comparison_code (cond1
, NULL
))
9195 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9196 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9197 && ! side_effects_p (x
))
9199 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
9200 *pfalse
= simplify_gen_binary (MULT
, mode
,
9202 ? simplify_gen_unary (NEG
, mode
,
9210 /* Similarly for MULT, AND and UMIN, except that for these the result
9212 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9213 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
9214 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9216 cond0
= XEXP (XEXP (x
, 0), 0);
9217 cond1
= XEXP (XEXP (x
, 1), 0);
9219 if (COMPARISON_P (cond0
)
9220 && COMPARISON_P (cond1
)
9221 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9222 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9223 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9224 || ((swap_condition (GET_CODE (cond0
))
9225 == reversed_comparison_code (cond1
, NULL
))
9226 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9227 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9228 && ! side_effects_p (x
))
9230 *ptrue
= *pfalse
= const0_rtx
;
9236 else if (code
== IF_THEN_ELSE
)
9238 /* If we have IF_THEN_ELSE already, extract the condition and
9239 canonicalize it if it is NE or EQ. */
9240 cond0
= XEXP (x
, 0);
9241 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
9242 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
9243 return XEXP (cond0
, 0);
9244 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
9246 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
9247 return XEXP (cond0
, 0);
9253 /* If X is a SUBREG, we can narrow both the true and false values
9254 if the inner expression, if there is a condition. */
9255 else if (code
== SUBREG
9256 && 0 != (cond0
= if_then_else_cond (SUBREG_REG (x
),
9259 true0
= simplify_gen_subreg (mode
, true0
,
9260 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9261 false0
= simplify_gen_subreg (mode
, false0
,
9262 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9263 if (true0
&& false0
)
9271 /* If X is a constant, this isn't special and will cause confusions
9272 if we treat it as such. Likewise if it is equivalent to a constant. */
9273 else if (CONSTANT_P (x
)
9274 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
9277 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9278 will be least confusing to the rest of the compiler. */
9279 else if (mode
== BImode
)
9281 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
9285 /* If X is known to be either 0 or -1, those are the true and
9286 false values when testing X. */
9287 else if (x
== constm1_rtx
|| x
== const0_rtx
9288 || (is_a
<scalar_int_mode
> (mode
, &int_mode
)
9289 && (num_sign_bit_copies (x
, int_mode
)
9290 == GET_MODE_PRECISION (int_mode
))))
9292 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
9296 /* Likewise for 0 or a single bit. */
9297 else if (HWI_COMPUTABLE_MODE_P (mode
)
9298 && pow2p_hwi (nz
= nonzero_bits (x
, mode
)))
9300 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
9304 /* Otherwise fail; show no condition with true and false values the same. */
9305 *ptrue
= *pfalse
= x
;
9309 /* Return the value of expression X given the fact that condition COND
9310 is known to be true when applied to REG as its first operand and VAL
9311 as its second. X is known to not be shared and so can be modified in
9314 We only handle the simplest cases, and specifically those cases that
9315 arise with IF_THEN_ELSE expressions. */
9318 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
9320 enum rtx_code code
= GET_CODE (x
);
9324 if (side_effects_p (x
))
9327 /* If either operand of the condition is a floating point value,
9328 then we have to avoid collapsing an EQ comparison. */
9330 && rtx_equal_p (x
, reg
)
9331 && ! FLOAT_MODE_P (GET_MODE (x
))
9332 && ! FLOAT_MODE_P (GET_MODE (val
)))
9335 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
9338 /* If X is (abs REG) and we know something about REG's relationship
9339 with zero, we may be able to simplify this. */
9341 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
9344 case GE
: case GT
: case EQ
:
9347 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
9349 GET_MODE (XEXP (x
, 0)));
9354 /* The only other cases we handle are MIN, MAX, and comparisons if the
9355 operands are the same as REG and VAL. */
9357 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
9359 if (rtx_equal_p (XEXP (x
, 0), val
))
9361 std::swap (val
, reg
);
9362 cond
= swap_condition (cond
);
9365 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
9367 if (COMPARISON_P (x
))
9369 if (comparison_dominates_p (cond
, code
))
9370 return const_true_rtx
;
9372 code
= reversed_comparison_code (x
, NULL
);
9374 && comparison_dominates_p (cond
, code
))
9379 else if (code
== SMAX
|| code
== SMIN
9380 || code
== UMIN
|| code
== UMAX
)
9382 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
9384 /* Do not reverse the condition when it is NE or EQ.
9385 This is because we cannot conclude anything about
9386 the value of 'SMAX (x, y)' when x is not equal to y,
9387 but we can when x equals y. */
9388 if ((code
== SMAX
|| code
== UMAX
)
9389 && ! (cond
== EQ
|| cond
== NE
))
9390 cond
= reverse_condition (cond
);
9395 return unsignedp
? x
: XEXP (x
, 1);
9397 return unsignedp
? x
: XEXP (x
, 0);
9399 return unsignedp
? XEXP (x
, 1) : x
;
9401 return unsignedp
? XEXP (x
, 0) : x
;
9408 else if (code
== SUBREG
)
9410 machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
9411 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
9413 if (SUBREG_REG (x
) != r
)
9415 /* We must simplify subreg here, before we lose track of the
9416 original inner_mode. */
9417 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
9418 inner_mode
, SUBREG_BYTE (x
));
9422 SUBST (SUBREG_REG (x
), r
);
9427 /* We don't have to handle SIGN_EXTEND here, because even in the
9428 case of replacing something with a modeless CONST_INT, a
9429 CONST_INT is already (supposed to be) a valid sign extension for
9430 its narrower mode, which implies it's already properly
9431 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9432 story is different. */
9433 else if (code
== ZERO_EXTEND
)
9435 machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
9436 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
9438 if (XEXP (x
, 0) != r
)
9440 /* We must simplify the zero_extend here, before we lose
9441 track of the original inner_mode. */
9442 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
9447 SUBST (XEXP (x
, 0), r
);
9453 fmt
= GET_RTX_FORMAT (code
);
9454 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9457 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
9458 else if (fmt
[i
] == 'E')
9459 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9460 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
9467 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9468 assignment as a field assignment. */
9471 rtx_equal_for_field_assignment_p (rtx x
, rtx y
, bool widen_x
)
9473 if (widen_x
&& GET_MODE (x
) != GET_MODE (y
))
9475 if (paradoxical_subreg_p (GET_MODE (x
), GET_MODE (y
)))
9477 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
9479 /* For big endian, adjust the memory offset. */
9480 if (BYTES_BIG_ENDIAN
)
9481 x
= adjust_address_nv (x
, GET_MODE (y
),
9482 -subreg_lowpart_offset (GET_MODE (x
),
9485 x
= adjust_address_nv (x
, GET_MODE (y
), 0);
9488 if (x
== y
|| rtx_equal_p (x
, y
))
9491 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
9494 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9495 Note that all SUBREGs of MEM are paradoxical; otherwise they
9496 would have been rewritten. */
9497 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
9498 && MEM_P (SUBREG_REG (y
))
9499 && rtx_equal_p (SUBREG_REG (y
),
9500 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
9503 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
9504 && MEM_P (SUBREG_REG (x
))
9505 && rtx_equal_p (SUBREG_REG (x
),
9506 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
9509 /* We used to see if get_last_value of X and Y were the same but that's
9510 not correct. In one direction, we'll cause the assignment to have
9511 the wrong destination and in the case, we'll import a register into this
9512 insn that might have already have been dead. So fail if none of the
9513 above cases are true. */
9517 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9518 Return that assignment if so.
9520 We only handle the most common cases. */
9523 make_field_assignment (rtx x
)
9525 rtx dest
= SET_DEST (x
);
9526 rtx src
= SET_SRC (x
);
9531 unsigned HOST_WIDE_INT len
;
9534 /* All the rules in this function are specific to scalar integers. */
9535 scalar_int_mode mode
;
9536 if (!is_a
<scalar_int_mode
> (GET_MODE (dest
), &mode
))
9539 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9540 a clear of a one-bit field. We will have changed it to
9541 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9544 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
9545 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
9546 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
9547 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9549 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9552 return gen_rtx_SET (assign
, const0_rtx
);
9556 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
9557 && subreg_lowpart_p (XEXP (src
, 0))
9558 && (GET_MODE_SIZE (GET_MODE (XEXP (src
, 0)))
9559 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src
, 0)))))
9560 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9561 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9562 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9563 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9565 assign
= make_extraction (VOIDmode
, dest
, 0,
9566 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9569 return gen_rtx_SET (assign
, const0_rtx
);
9573 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9575 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9576 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9577 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9579 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9582 return gen_rtx_SET (assign
, const1_rtx
);
9586 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9587 SRC is an AND with all bits of that field set, then we can discard
9589 if (GET_CODE (dest
) == ZERO_EXTRACT
9590 && CONST_INT_P (XEXP (dest
, 1))
9591 && GET_CODE (src
) == AND
9592 && CONST_INT_P (XEXP (src
, 1)))
9594 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9595 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9596 unsigned HOST_WIDE_INT ze_mask
;
9598 if (width
>= HOST_BITS_PER_WIDE_INT
)
9601 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9603 /* Complete overlap. We can remove the source AND. */
9604 if ((and_mask
& ze_mask
) == ze_mask
)
9605 return gen_rtx_SET (dest
, XEXP (src
, 0));
9607 /* Partial overlap. We can reduce the source AND. */
9608 if ((and_mask
& ze_mask
) != and_mask
)
9610 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9611 gen_int_mode (and_mask
& ze_mask
, mode
));
9612 return gen_rtx_SET (dest
, src
);
9616 /* The other case we handle is assignments into a constant-position
9617 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9618 a mask that has all one bits except for a group of zero bits and
9619 OTHER is known to have zeros where C1 has ones, this is such an
9620 assignment. Compute the position and length from C1. Shift OTHER
9621 to the appropriate position, force it to the required mode, and
9622 make the extraction. Check for the AND in both operands. */
9624 /* One or more SUBREGs might obscure the constant-position field
9625 assignment. The first one we are likely to encounter is an outer
9626 narrowing SUBREG, which we can just strip for the purposes of
9627 identifying the constant-field assignment. */
9628 scalar_int_mode src_mode
= mode
;
9629 if (GET_CODE (src
) == SUBREG
9630 && subreg_lowpart_p (src
)
9631 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (src
)), &src_mode
))
9632 src
= SUBREG_REG (src
);
9634 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9637 rhs
= expand_compound_operation (XEXP (src
, 0));
9638 lhs
= expand_compound_operation (XEXP (src
, 1));
9640 if (GET_CODE (rhs
) == AND
9641 && CONST_INT_P (XEXP (rhs
, 1))
9642 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9643 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9644 /* The second SUBREG that might get in the way is a paradoxical
9645 SUBREG around the first operand of the AND. We want to
9646 pretend the operand is as wide as the destination here. We
9647 do this by adjusting the MEM to wider mode for the sole
9648 purpose of the call to rtx_equal_for_field_assignment_p. Also
9649 note this trick only works for MEMs. */
9650 else if (GET_CODE (rhs
) == AND
9651 && paradoxical_subreg_p (XEXP (rhs
, 0))
9652 && MEM_P (SUBREG_REG (XEXP (rhs
, 0)))
9653 && CONST_INT_P (XEXP (rhs
, 1))
9654 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs
, 0)),
9656 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9657 else if (GET_CODE (lhs
) == AND
9658 && CONST_INT_P (XEXP (lhs
, 1))
9659 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9660 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9661 /* The second SUBREG that might get in the way is a paradoxical
9662 SUBREG around the first operand of the AND. We want to
9663 pretend the operand is as wide as the destination here. We
9664 do this by adjusting the MEM to wider mode for the sole
9665 purpose of the call to rtx_equal_for_field_assignment_p. Also
9666 note this trick only works for MEMs. */
9667 else if (GET_CODE (lhs
) == AND
9668 && paradoxical_subreg_p (XEXP (lhs
, 0))
9669 && MEM_P (SUBREG_REG (XEXP (lhs
, 0)))
9670 && CONST_INT_P (XEXP (lhs
, 1))
9671 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs
, 0)),
9673 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9677 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (mode
), &len
);
9679 || pos
+ len
> GET_MODE_PRECISION (mode
)
9680 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
9681 || (c1
& nonzero_bits (other
, mode
)) != 0)
9684 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9688 /* The mode to use for the source is the mode of the assignment, or of
9689 what is inside a possible STRICT_LOW_PART. */
9690 machine_mode new_mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9691 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9693 /* Shift OTHER right POS places and make it the source, restricting it
9694 to the proper length and mode. */
9696 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9697 src_mode
, other
, pos
),
9699 src
= force_to_mode (src
, new_mode
,
9700 len
>= HOST_BITS_PER_WIDE_INT
9702 : (HOST_WIDE_INT_1U
<< len
) - 1,
9705 /* If SRC is masked by an AND that does not make a difference in
9706 the value being stored, strip it. */
9707 if (GET_CODE (assign
) == ZERO_EXTRACT
9708 && CONST_INT_P (XEXP (assign
, 1))
9709 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9710 && GET_CODE (src
) == AND
9711 && CONST_INT_P (XEXP (src
, 1))
9712 && UINTVAL (XEXP (src
, 1))
9713 == (HOST_WIDE_INT_1U
<< INTVAL (XEXP (assign
, 1))) - 1)
9714 src
= XEXP (src
, 0);
9716 return gen_rtx_SET (assign
, src
);
9719 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9723 apply_distributive_law (rtx x
)
9725 enum rtx_code code
= GET_CODE (x
);
9726 enum rtx_code inner_code
;
9727 rtx lhs
, rhs
, other
;
9730 /* Distributivity is not true for floating point as it can change the
9731 value. So we don't do it unless -funsafe-math-optimizations. */
9732 if (FLOAT_MODE_P (GET_MODE (x
))
9733 && ! flag_unsafe_math_optimizations
)
9736 /* The outer operation can only be one of the following: */
9737 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9738 && code
!= PLUS
&& code
!= MINUS
)
9744 /* If either operand is a primitive we can't do anything, so get out
9746 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9749 lhs
= expand_compound_operation (lhs
);
9750 rhs
= expand_compound_operation (rhs
);
9751 inner_code
= GET_CODE (lhs
);
9752 if (inner_code
!= GET_CODE (rhs
))
9755 /* See if the inner and outer operations distribute. */
9762 /* These all distribute except over PLUS. */
9763 if (code
== PLUS
|| code
== MINUS
)
9768 if (code
!= PLUS
&& code
!= MINUS
)
9773 /* This is also a multiply, so it distributes over everything. */
9776 /* This used to handle SUBREG, but this turned out to be counter-
9777 productive, since (subreg (op ...)) usually is not handled by
9778 insn patterns, and this "optimization" therefore transformed
9779 recognizable patterns into unrecognizable ones. Therefore the
9780 SUBREG case was removed from here.
9782 It is possible that distributing SUBREG over arithmetic operations
9783 leads to an intermediate result than can then be optimized further,
9784 e.g. by moving the outer SUBREG to the other side of a SET as done
9785 in simplify_set. This seems to have been the original intent of
9786 handling SUBREGs here.
9788 However, with current GCC this does not appear to actually happen,
9789 at least on major platforms. If some case is found where removing
9790 the SUBREG case here prevents follow-on optimizations, distributing
9791 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9797 /* Set LHS and RHS to the inner operands (A and B in the example
9798 above) and set OTHER to the common operand (C in the example).
9799 There is only one way to do this unless the inner operation is
9801 if (COMMUTATIVE_ARITH_P (lhs
)
9802 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
9803 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
9804 else if (COMMUTATIVE_ARITH_P (lhs
)
9805 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
9806 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
9807 else if (COMMUTATIVE_ARITH_P (lhs
)
9808 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
9809 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
9810 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
9811 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
9815 /* Form the new inner operation, seeing if it simplifies first. */
9816 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
9818 /* There is one exception to the general way of distributing:
9819 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9820 if (code
== XOR
&& inner_code
== IOR
)
9823 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
9826 /* We may be able to continuing distributing the result, so call
9827 ourselves recursively on the inner operation before forming the
9828 outer operation, which we return. */
9829 return simplify_gen_binary (inner_code
, GET_MODE (x
),
9830 apply_distributive_law (tem
), other
);
9833 /* See if X is of the form (* (+ A B) C), and if so convert to
9834 (+ (* A C) (* B C)) and try to simplify.
9836 Most of the time, this results in no change. However, if some of
9837 the operands are the same or inverses of each other, simplifications
9840 For example, (and (ior A B) (not B)) can occur as the result of
9841 expanding a bit field assignment. When we apply the distributive
9842 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9843 which then simplifies to (and (A (not B))).
9845 Note that no checks happen on the validity of applying the inverse
9846 distributive law. This is pointless since we can do it in the
9847 few places where this routine is called.
9849 N is the index of the term that is decomposed (the arithmetic operation,
9850 i.e. (+ A B) in the first example above). !N is the index of the term that
9851 is distributed, i.e. of C in the first example above. */
9853 distribute_and_simplify_rtx (rtx x
, int n
)
9856 enum rtx_code outer_code
, inner_code
;
9857 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
9859 /* Distributivity is not true for floating point as it can change the
9860 value. So we don't do it unless -funsafe-math-optimizations. */
9861 if (FLOAT_MODE_P (GET_MODE (x
))
9862 && ! flag_unsafe_math_optimizations
)
9865 decomposed
= XEXP (x
, n
);
9866 if (!ARITHMETIC_P (decomposed
))
9869 mode
= GET_MODE (x
);
9870 outer_code
= GET_CODE (x
);
9871 distributed
= XEXP (x
, !n
);
9873 inner_code
= GET_CODE (decomposed
);
9874 inner_op0
= XEXP (decomposed
, 0);
9875 inner_op1
= XEXP (decomposed
, 1);
9877 /* Special case (and (xor B C) (not A)), which is equivalent to
9878 (xor (ior A B) (ior A C)) */
9879 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
9881 distributed
= XEXP (distributed
, 0);
9887 /* Distribute the second term. */
9888 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
9889 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
9893 /* Distribute the first term. */
9894 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
9895 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
9898 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
9900 if (GET_CODE (tmp
) != outer_code
9901 && (set_src_cost (tmp
, mode
, optimize_this_for_speed_p
)
9902 < set_src_cost (x
, mode
, optimize_this_for_speed_p
)))
9908 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9909 in MODE. Return an equivalent form, if different from (and VAROP
9910 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9913 simplify_and_const_int_1 (scalar_int_mode mode
, rtx varop
,
9914 unsigned HOST_WIDE_INT constop
)
9916 unsigned HOST_WIDE_INT nonzero
;
9917 unsigned HOST_WIDE_INT orig_constop
;
9922 orig_constop
= constop
;
9923 if (GET_CODE (varop
) == CLOBBER
)
9926 /* Simplify VAROP knowing that we will be only looking at some of the
9929 Note by passing in CONSTOP, we guarantee that the bits not set in
9930 CONSTOP are not significant and will never be examined. We must
9931 ensure that is the case by explicitly masking out those bits
9932 before returning. */
9933 varop
= force_to_mode (varop
, mode
, constop
, 0);
9935 /* If VAROP is a CLOBBER, we will fail so return it. */
9936 if (GET_CODE (varop
) == CLOBBER
)
9939 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9940 to VAROP and return the new constant. */
9941 if (CONST_INT_P (varop
))
9942 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
9944 /* See what bits may be nonzero in VAROP. Unlike the general case of
9945 a call to nonzero_bits, here we don't care about bits outside
9948 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
9950 /* Turn off all bits in the constant that are known to already be zero.
9951 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9952 which is tested below. */
9956 /* If we don't have any bits left, return zero. */
9960 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9961 a power of two, we can replace this with an ASHIFT. */
9962 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
9963 && (i
= exact_log2 (constop
)) >= 0)
9964 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
9966 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9967 or XOR, then try to apply the distributive law. This may eliminate
9968 operations if either branch can be simplified because of the AND.
9969 It may also make some cases more complex, but those cases probably
9970 won't match a pattern either with or without this. */
9972 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
9974 scalar_int_mode varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
9978 apply_distributive_law
9979 (simplify_gen_binary (GET_CODE (varop
), varop_mode
,
9980 simplify_and_const_int (NULL_RTX
, varop_mode
,
9983 simplify_and_const_int (NULL_RTX
, varop_mode
,
9988 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9989 the AND and see if one of the operands simplifies to zero. If so, we
9990 may eliminate it. */
9992 if (GET_CODE (varop
) == PLUS
9993 && pow2p_hwi (constop
+ 1))
9997 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
9998 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
9999 if (o0
== const0_rtx
)
10001 if (o1
== const0_rtx
)
10005 /* Make a SUBREG if necessary. If we can't make it, fail. */
10006 varop
= gen_lowpart (mode
, varop
);
10007 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
10010 /* If we are only masking insignificant bits, return VAROP. */
10011 if (constop
== nonzero
)
10014 if (varop
== orig_varop
&& constop
== orig_constop
)
10017 /* Otherwise, return an AND. */
10018 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
10022 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10025 Return an equivalent form, if different from X. Otherwise, return X. If
10026 X is zero, we are to always construct the equivalent form. */
10029 simplify_and_const_int (rtx x
, scalar_int_mode mode
, rtx varop
,
10030 unsigned HOST_WIDE_INT constop
)
10032 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
10037 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
10038 gen_int_mode (constop
, mode
));
10039 if (GET_MODE (x
) != mode
)
10040 x
= gen_lowpart (mode
, x
);
10044 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10045 We don't care about bits outside of those defined in MODE.
10047 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10048 a shift, AND, or zero_extract, we can do better. */
10051 reg_nonzero_bits_for_combine (const_rtx x
, scalar_int_mode xmode
,
10052 scalar_int_mode mode
,
10053 unsigned HOST_WIDE_INT
*nonzero
)
10056 reg_stat_type
*rsp
;
10058 /* If X is a register whose nonzero bits value is current, use it.
10059 Otherwise, if X is a register whose value we can find, use that
10060 value. Otherwise, use the previously-computed global nonzero bits
10061 for this register. */
10063 rsp
= ®_stat
[REGNO (x
)];
10064 if (rsp
->last_set_value
!= 0
10065 && (rsp
->last_set_mode
== mode
10066 || (GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
10067 && GET_MODE_CLASS (mode
) == MODE_INT
))
10068 && ((rsp
->last_set_label
>= label_tick_ebb_start
10069 && rsp
->last_set_label
< label_tick
)
10070 || (rsp
->last_set_label
== label_tick
10071 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10072 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10073 && REGNO (x
) < reg_n_sets_max
10074 && REG_N_SETS (REGNO (x
)) == 1
10075 && !REGNO_REG_SET_P
10076 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10079 /* Note that, even if the precision of last_set_mode is lower than that
10080 of mode, record_value_for_reg invoked nonzero_bits on the register
10081 with nonzero_bits_mode (because last_set_mode is necessarily integral
10082 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10083 are all valid, hence in mode too since nonzero_bits_mode is defined
10084 to the largest HWI_COMPUTABLE_MODE_P mode. */
10085 *nonzero
&= rsp
->last_set_nonzero_bits
;
10089 tem
= get_last_value (x
);
10092 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
10093 tem
= sign_extend_short_imm (tem
, xmode
, GET_MODE_PRECISION (mode
));
10098 if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
10100 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
10102 if (GET_MODE_PRECISION (xmode
) < GET_MODE_PRECISION (mode
))
10103 /* We don't know anything about the upper bits. */
10104 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (xmode
);
10112 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10113 end of X that are known to be equal to the sign bit. X will be used
10114 in mode MODE; the returned value will always be between 1 and the
10115 number of bits in MODE. */
10118 reg_num_sign_bit_copies_for_combine (const_rtx x
, scalar_int_mode xmode
,
10119 scalar_int_mode mode
,
10120 unsigned int *result
)
10123 reg_stat_type
*rsp
;
10125 rsp
= ®_stat
[REGNO (x
)];
10126 if (rsp
->last_set_value
!= 0
10127 && rsp
->last_set_mode
== mode
10128 && ((rsp
->last_set_label
>= label_tick_ebb_start
10129 && rsp
->last_set_label
< label_tick
)
10130 || (rsp
->last_set_label
== label_tick
10131 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10132 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10133 && REGNO (x
) < reg_n_sets_max
10134 && REG_N_SETS (REGNO (x
)) == 1
10135 && !REGNO_REG_SET_P
10136 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10139 *result
= rsp
->last_set_sign_bit_copies
;
10143 tem
= get_last_value (x
);
10147 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
10148 && GET_MODE_PRECISION (xmode
) == GET_MODE_PRECISION (mode
))
10149 *result
= rsp
->sign_bit_copies
;
10154 /* Return the number of "extended" bits there are in X, when interpreted
10155 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10156 unsigned quantities, this is the number of high-order zero bits.
10157 For signed quantities, this is the number of copies of the sign bit
10158 minus 1. In both case, this function returns the number of "spare"
10159 bits. For example, if two quantities for which this function returns
10160 at least 1 are added, the addition is known not to overflow.
10162 This function will always return 0 unless called during combine, which
10163 implies that it must be called from a define_split. */
10166 extended_count (const_rtx x
, machine_mode mode
, int unsignedp
)
10168 if (nonzero_sign_valid
== 0)
10171 scalar_int_mode int_mode
;
10173 ? (is_a
<scalar_int_mode
> (mode
, &int_mode
)
10174 && HWI_COMPUTABLE_MODE_P (int_mode
)
10175 ? (unsigned int) (GET_MODE_PRECISION (int_mode
) - 1
10176 - floor_log2 (nonzero_bits (x
, int_mode
)))
10178 : num_sign_bit_copies (x
, mode
) - 1);
10181 /* This function is called from `simplify_shift_const' to merge two
10182 outer operations. Specifically, we have already found that we need
10183 to perform operation *POP0 with constant *PCONST0 at the outermost
10184 position. We would now like to also perform OP1 with constant CONST1
10185 (with *POP0 being done last).
10187 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10188 the resulting operation. *PCOMP_P is set to 1 if we would need to
10189 complement the innermost operand, otherwise it is unchanged.
10191 MODE is the mode in which the operation will be done. No bits outside
10192 the width of this mode matter. It is assumed that the width of this mode
10193 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10195 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10196 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10197 result is simply *PCONST0.
10199 If the resulting operation cannot be expressed as one operation, we
10200 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10203 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, machine_mode mode
, int *pcomp_p
)
10205 enum rtx_code op0
= *pop0
;
10206 HOST_WIDE_INT const0
= *pconst0
;
10208 const0
&= GET_MODE_MASK (mode
);
10209 const1
&= GET_MODE_MASK (mode
);
10211 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10215 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10218 if (op1
== UNKNOWN
|| op0
== SET
)
10221 else if (op0
== UNKNOWN
)
10222 op0
= op1
, const0
= const1
;
10224 else if (op0
== op1
)
10248 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10249 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
10252 /* If the two constants aren't the same, we can't do anything. The
10253 remaining six cases can all be done. */
10254 else if (const0
!= const1
)
10262 /* (a & b) | b == b */
10264 else /* op1 == XOR */
10265 /* (a ^ b) | b == a | b */
10271 /* (a & b) ^ b == (~a) & b */
10272 op0
= AND
, *pcomp_p
= 1;
10273 else /* op1 == IOR */
10274 /* (a | b) ^ b == a & ~b */
10275 op0
= AND
, const0
= ~const0
;
10280 /* (a | b) & b == b */
10282 else /* op1 == XOR */
10283 /* (a ^ b) & b) == (~a) & b */
10290 /* Check for NO-OP cases. */
10291 const0
&= GET_MODE_MASK (mode
);
10293 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
10295 else if (const0
== 0 && op0
== AND
)
10297 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
10303 /* ??? Slightly redundant with the above mask, but not entirely.
10304 Moving this above means we'd have to sign-extend the mode mask
10305 for the final test. */
10306 if (op0
!= UNKNOWN
&& op0
!= NEG
)
10307 *pconst0
= trunc_int_for_mode (const0
, mode
);
10312 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10313 the shift in. The original shift operation CODE is performed on OP in
10314 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10315 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10316 result of the shift is subject to operation OUTER_CODE with operand
10319 static machine_mode
10320 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
10321 machine_mode orig_mode
, machine_mode mode
,
10322 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
10324 gcc_assert (GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (orig_mode
));
10326 /* In general we can't perform in wider mode for right shift and rotate. */
10330 /* We can still widen if the bits brought in from the left are identical
10331 to the sign bit of ORIG_MODE. */
10332 if (num_sign_bit_copies (op
, mode
)
10333 > (unsigned) (GET_MODE_PRECISION (mode
)
10334 - GET_MODE_PRECISION (orig_mode
)))
10339 /* Similarly here but with zero bits. */
10340 if (HWI_COMPUTABLE_MODE_P (mode
)
10341 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
10344 /* We can also widen if the bits brought in will be masked off. This
10345 operation is performed in ORIG_MODE. */
10346 if (outer_code
== AND
)
10348 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
10351 && GET_MODE_PRECISION (orig_mode
) - care_bits
>= count
)
10360 gcc_unreachable ();
10367 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10368 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10369 if we cannot simplify it. Otherwise, return a simplified value.
10371 The shift is normally computed in the widest mode we find in VAROP, as
10372 long as it isn't a different number of words than RESULT_MODE. Exceptions
10373 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10376 simplify_shift_const_1 (enum rtx_code code
, machine_mode result_mode
,
10377 rtx varop
, int orig_count
)
10379 enum rtx_code orig_code
= code
;
10380 rtx orig_varop
= varop
;
10382 machine_mode mode
= result_mode
;
10383 machine_mode shift_mode
;
10384 scalar_int_mode tmode
, inner_mode
, int_mode
, int_varop_mode
, int_result_mode
;
10385 unsigned int mode_words
10386 = (GET_MODE_SIZE (mode
) + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
;
10387 /* We form (outer_op (code varop count) (outer_const)). */
10388 enum rtx_code outer_op
= UNKNOWN
;
10389 HOST_WIDE_INT outer_const
= 0;
10390 int complement_p
= 0;
10393 /* Make sure and truncate the "natural" shift on the way in. We don't
10394 want to do this inside the loop as it makes it more difficult to
10396 if (SHIFT_COUNT_TRUNCATED
)
10397 orig_count
&= GET_MODE_UNIT_BITSIZE (mode
) - 1;
10399 /* If we were given an invalid count, don't do anything except exactly
10400 what was requested. */
10402 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_UNIT_PRECISION (mode
))
10405 count
= orig_count
;
10407 /* Unless one of the branches of the `if' in this loop does a `continue',
10408 we will `break' the loop after the `if'. */
10412 /* If we have an operand of (clobber (const_int 0)), fail. */
10413 if (GET_CODE (varop
) == CLOBBER
)
10416 /* Convert ROTATERT to ROTATE. */
10417 if (code
== ROTATERT
)
10419 unsigned int bitsize
= GET_MODE_UNIT_PRECISION (result_mode
);
10421 count
= bitsize
- count
;
10424 shift_mode
= result_mode
;
10425 if (shift_mode
!= mode
)
10427 /* We only change the modes of scalar shifts. */
10428 int_mode
= as_a
<scalar_int_mode
> (mode
);
10429 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10430 shift_mode
= try_widen_shift_mode (code
, varop
, count
,
10431 int_result_mode
, int_mode
,
10432 outer_op
, outer_const
);
10435 scalar_int_mode shift_unit_mode
10436 = as_a
<scalar_int_mode
> (GET_MODE_INNER (shift_mode
));
10438 /* Handle cases where the count is greater than the size of the mode
10439 minus 1. For ASHIFT, use the size minus one as the count (this can
10440 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10441 take the count modulo the size. For other shifts, the result is
10444 Since these shifts are being produced by the compiler by combining
10445 multiple operations, each of which are defined, we know what the
10446 result is supposed to be. */
10448 if (count
> (GET_MODE_PRECISION (shift_unit_mode
) - 1))
10450 if (code
== ASHIFTRT
)
10451 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10452 else if (code
== ROTATE
|| code
== ROTATERT
)
10453 count
%= GET_MODE_PRECISION (shift_unit_mode
);
10456 /* We can't simply return zero because there may be an
10458 varop
= const0_rtx
;
10464 /* If we discovered we had to complement VAROP, leave. Making a NOT
10465 here would cause an infinite loop. */
10469 if (shift_mode
== shift_unit_mode
)
10471 /* An arithmetic right shift of a quantity known to be -1 or 0
10473 if (code
== ASHIFTRT
10474 && (num_sign_bit_copies (varop
, shift_unit_mode
)
10475 == GET_MODE_PRECISION (shift_unit_mode
)))
10481 /* If we are doing an arithmetic right shift and discarding all but
10482 the sign bit copies, this is equivalent to doing a shift by the
10483 bitsize minus one. Convert it into that shift because it will
10484 often allow other simplifications. */
10486 if (code
== ASHIFTRT
10487 && (count
+ num_sign_bit_copies (varop
, shift_unit_mode
)
10488 >= GET_MODE_PRECISION (shift_unit_mode
)))
10489 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10491 /* We simplify the tests below and elsewhere by converting
10492 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10493 `make_compound_operation' will convert it to an ASHIFTRT for
10494 those machines (such as VAX) that don't have an LSHIFTRT. */
10495 if (code
== ASHIFTRT
10496 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10497 && val_signbit_known_clear_p (shift_unit_mode
,
10498 nonzero_bits (varop
,
10502 if (((code
== LSHIFTRT
10503 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10504 && !(nonzero_bits (varop
, shift_unit_mode
) >> count
))
10506 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10507 && !((nonzero_bits (varop
, shift_unit_mode
) << count
)
10508 & GET_MODE_MASK (shift_unit_mode
))))
10509 && !side_effects_p (varop
))
10510 varop
= const0_rtx
;
10513 switch (GET_CODE (varop
))
10519 new_rtx
= expand_compound_operation (varop
);
10520 if (new_rtx
!= varop
)
10528 /* The following rules apply only to scalars. */
10529 if (shift_mode
!= shift_unit_mode
)
10531 int_mode
= as_a
<scalar_int_mode
> (mode
);
10533 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10534 minus the width of a smaller mode, we can do this with a
10535 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10536 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10537 && ! mode_dependent_address_p (XEXP (varop
, 0),
10538 MEM_ADDR_SPACE (varop
))
10539 && ! MEM_VOLATILE_P (varop
)
10540 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode
) - count
, 1)
10543 new_rtx
= adjust_address_nv (varop
, tmode
,
10544 BYTES_BIG_ENDIAN
? 0
10545 : count
/ BITS_PER_UNIT
);
10547 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
10548 : ZERO_EXTEND
, int_mode
, new_rtx
);
10555 /* The following rules apply only to scalars. */
10556 if (shift_mode
!= shift_unit_mode
)
10558 int_mode
= as_a
<scalar_int_mode
> (mode
);
10559 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10561 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10562 the same number of words as what we've seen so far. Then store
10563 the widest mode in MODE. */
10564 if (subreg_lowpart_p (varop
)
10565 && is_int_mode (GET_MODE (SUBREG_REG (varop
)), &inner_mode
)
10566 && GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_varop_mode
)
10567 && (unsigned int) ((GET_MODE_SIZE (inner_mode
)
10568 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
10570 && GET_MODE_CLASS (int_varop_mode
) == MODE_INT
)
10572 varop
= SUBREG_REG (varop
);
10573 if (GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_mode
))
10580 /* Some machines use MULT instead of ASHIFT because MULT
10581 is cheaper. But it is still better on those machines to
10582 merge two shifts into one. */
10583 if (CONST_INT_P (XEXP (varop
, 1))
10584 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10587 = simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
10589 GEN_INT (exact_log2 (
10590 UINTVAL (XEXP (varop
, 1)))));
10596 /* Similar, for when divides are cheaper. */
10597 if (CONST_INT_P (XEXP (varop
, 1))
10598 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10601 = simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10603 GEN_INT (exact_log2 (
10604 UINTVAL (XEXP (varop
, 1)))));
10610 /* If we are extracting just the sign bit of an arithmetic
10611 right shift, that shift is not needed. However, the sign
10612 bit of a wider mode may be different from what would be
10613 interpreted as the sign bit in a narrower mode, so, if
10614 the result is narrower, don't discard the shift. */
10615 if (code
== LSHIFTRT
10616 && count
== (GET_MODE_UNIT_BITSIZE (result_mode
) - 1)
10617 && (GET_MODE_UNIT_BITSIZE (result_mode
)
10618 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop
))))
10620 varop
= XEXP (varop
, 0);
10629 /* The following rules apply only to scalars. */
10630 if (shift_mode
!= shift_unit_mode
)
10632 int_mode
= as_a
<scalar_int_mode
> (mode
);
10633 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10634 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10636 /* Here we have two nested shifts. The result is usually the
10637 AND of a new shift with a mask. We compute the result below. */
10638 if (CONST_INT_P (XEXP (varop
, 1))
10639 && INTVAL (XEXP (varop
, 1)) >= 0
10640 && INTVAL (XEXP (varop
, 1)) < GET_MODE_PRECISION (int_varop_mode
)
10641 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
10642 && HWI_COMPUTABLE_MODE_P (int_mode
))
10644 enum rtx_code first_code
= GET_CODE (varop
);
10645 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10646 unsigned HOST_WIDE_INT mask
;
10649 /* We have one common special case. We can't do any merging if
10650 the inner code is an ASHIFTRT of a smaller mode. However, if
10651 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10652 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10653 we can convert it to
10654 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10655 This simplifies certain SIGN_EXTEND operations. */
10656 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10657 && count
== (GET_MODE_PRECISION (int_result_mode
)
10658 - GET_MODE_PRECISION (int_varop_mode
)))
10660 /* C3 has the low-order C1 bits zero. */
10662 mask
= GET_MODE_MASK (int_mode
)
10663 & ~((HOST_WIDE_INT_1U
<< first_count
) - 1);
10665 varop
= simplify_and_const_int (NULL_RTX
, int_result_mode
,
10666 XEXP (varop
, 0), mask
);
10667 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
,
10668 int_result_mode
, varop
, count
);
10669 count
= first_count
;
10674 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10675 than C1 high-order bits equal to the sign bit, we can convert
10676 this to either an ASHIFT or an ASHIFTRT depending on the
10679 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10681 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10682 && int_varop_mode
== shift_unit_mode
10683 && (num_sign_bit_copies (XEXP (varop
, 0), shift_unit_mode
)
10686 varop
= XEXP (varop
, 0);
10687 count
-= first_count
;
10697 /* There are some cases we can't do. If CODE is ASHIFTRT,
10698 we can only do this if FIRST_CODE is also ASHIFTRT.
10700 We can't do the case when CODE is ROTATE and FIRST_CODE is
10703 If the mode of this shift is not the mode of the outer shift,
10704 we can't do this if either shift is a right shift or ROTATE.
10706 Finally, we can't do any of these if the mode is too wide
10707 unless the codes are the same.
10709 Handle the case where the shift codes are the same
10712 if (code
== first_code
)
10714 if (int_varop_mode
!= int_result_mode
10715 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10716 || code
== ROTATE
))
10719 count
+= first_count
;
10720 varop
= XEXP (varop
, 0);
10724 if (code
== ASHIFTRT
10725 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10726 || GET_MODE_PRECISION (int_mode
) > HOST_BITS_PER_WIDE_INT
10727 || (int_varop_mode
!= int_result_mode
10728 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10729 || first_code
== ROTATE
10730 || code
== ROTATE
)))
10733 /* To compute the mask to apply after the shift, shift the
10734 nonzero bits of the inner shift the same way the
10735 outer shift will. */
10737 mask_rtx
= gen_int_mode (nonzero_bits (varop
, int_varop_mode
),
10741 = simplify_const_binary_operation (code
, int_result_mode
,
10742 mask_rtx
, GEN_INT (count
));
10744 /* Give up if we can't compute an outer operation to use. */
10746 || !CONST_INT_P (mask_rtx
)
10747 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10749 int_result_mode
, &complement_p
))
10752 /* If the shifts are in the same direction, we add the
10753 counts. Otherwise, we subtract them. */
10754 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10755 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10756 count
+= first_count
;
10758 count
-= first_count
;
10760 /* If COUNT is positive, the new shift is usually CODE,
10761 except for the two exceptions below, in which case it is
10762 FIRST_CODE. If the count is negative, FIRST_CODE should
10765 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10766 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10768 else if (count
< 0)
10769 code
= first_code
, count
= -count
;
10771 varop
= XEXP (varop
, 0);
10775 /* If we have (A << B << C) for any shift, we can convert this to
10776 (A << C << B). This wins if A is a constant. Only try this if
10777 B is not a constant. */
10779 else if (GET_CODE (varop
) == code
10780 && CONST_INT_P (XEXP (varop
, 0))
10781 && !CONST_INT_P (XEXP (varop
, 1)))
10783 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10784 sure the result will be masked. See PR70222. */
10785 if (code
== LSHIFTRT
10786 && int_mode
!= int_result_mode
10787 && !merge_outer_ops (&outer_op
, &outer_const
, AND
,
10788 GET_MODE_MASK (int_result_mode
)
10789 >> orig_count
, int_result_mode
,
10792 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10793 up outer sign extension (often left and right shift) is
10794 hardly more efficient than the original. See PR70429. */
10795 if (code
== ASHIFTRT
&& int_mode
!= int_result_mode
)
10798 rtx new_rtx
= simplify_const_binary_operation (code
, int_mode
,
10801 varop
= gen_rtx_fmt_ee (code
, int_mode
, new_rtx
, XEXP (varop
, 1));
10808 /* The following rules apply only to scalars. */
10809 if (shift_mode
!= shift_unit_mode
)
10812 /* Make this fit the case below. */
10813 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0), constm1_rtx
);
10819 /* The following rules apply only to scalars. */
10820 if (shift_mode
!= shift_unit_mode
)
10822 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10823 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10825 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10826 with C the size of VAROP - 1 and the shift is logical if
10827 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10828 we have an (le X 0) operation. If we have an arithmetic shift
10829 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10830 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10832 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
10833 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
10834 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10835 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10836 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
10837 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10840 varop
= gen_rtx_LE (int_varop_mode
, XEXP (varop
, 1),
10843 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10844 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
10849 /* If we have (shift (logical)), move the logical to the outside
10850 to allow it to possibly combine with another logical and the
10851 shift to combine with another shift. This also canonicalizes to
10852 what a ZERO_EXTRACT looks like. Also, some machines have
10853 (and (shift)) insns. */
10855 if (CONST_INT_P (XEXP (varop
, 1))
10856 /* We can't do this if we have (ashiftrt (xor)) and the
10857 constant has its sign bit set in shift_unit_mode with
10858 shift_unit_mode wider than result_mode. */
10859 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10860 && int_result_mode
!= shift_unit_mode
10861 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10863 && (new_rtx
= simplify_const_binary_operation
10864 (code
, int_result_mode
,
10865 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
10866 GEN_INT (count
))) != 0
10867 && CONST_INT_P (new_rtx
)
10868 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
10869 INTVAL (new_rtx
), int_result_mode
,
10872 varop
= XEXP (varop
, 0);
10876 /* If we can't do that, try to simplify the shift in each arm of the
10877 logical expression, make a new logical expression, and apply
10878 the inverse distributive law. This also can't be done for
10879 (ashiftrt (xor)) where we've widened the shift and the constant
10880 changes the sign bit. */
10881 if (CONST_INT_P (XEXP (varop
, 1))
10882 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10883 && int_result_mode
!= shift_unit_mode
10884 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10887 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
10888 XEXP (varop
, 0), count
);
10889 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
10890 XEXP (varop
, 1), count
);
10892 varop
= simplify_gen_binary (GET_CODE (varop
), shift_unit_mode
,
10894 varop
= apply_distributive_law (varop
);
10902 /* The following rules apply only to scalars. */
10903 if (shift_mode
!= shift_unit_mode
)
10905 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10907 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10908 says that the sign bit can be tested, FOO has mode MODE, C is
10909 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10910 that may be nonzero. */
10911 if (code
== LSHIFTRT
10912 && XEXP (varop
, 1) == const0_rtx
10913 && GET_MODE (XEXP (varop
, 0)) == int_result_mode
10914 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
10915 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
10916 && STORE_FLAG_VALUE
== -1
10917 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
10918 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
10919 int_result_mode
, &complement_p
))
10921 varop
= XEXP (varop
, 0);
10928 /* The following rules apply only to scalars. */
10929 if (shift_mode
!= shift_unit_mode
)
10931 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10933 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10934 than the number of bits in the mode is equivalent to A. */
10935 if (code
== LSHIFTRT
10936 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
10937 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1)
10939 varop
= XEXP (varop
, 0);
10944 /* NEG commutes with ASHIFT since it is multiplication. Move the
10945 NEG outside to allow shifts to combine. */
10947 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0,
10948 int_result_mode
, &complement_p
))
10950 varop
= XEXP (varop
, 0);
10956 /* The following rules apply only to scalars. */
10957 if (shift_mode
!= shift_unit_mode
)
10959 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10961 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10962 is one less than the number of bits in the mode is
10963 equivalent to (xor A 1). */
10964 if (code
== LSHIFTRT
10965 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
10966 && XEXP (varop
, 1) == constm1_rtx
10967 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
10968 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
10969 int_result_mode
, &complement_p
))
10972 varop
= XEXP (varop
, 0);
10976 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10977 that might be nonzero in BAR are those being shifted out and those
10978 bits are known zero in FOO, we can replace the PLUS with FOO.
10979 Similarly in the other operand order. This code occurs when
10980 we are computing the size of a variable-size array. */
10982 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10983 && count
< HOST_BITS_PER_WIDE_INT
10984 && nonzero_bits (XEXP (varop
, 1), int_result_mode
) >> count
== 0
10985 && (nonzero_bits (XEXP (varop
, 1), int_result_mode
)
10986 & nonzero_bits (XEXP (varop
, 0), int_result_mode
)) == 0)
10988 varop
= XEXP (varop
, 0);
10991 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10992 && count
< HOST_BITS_PER_WIDE_INT
10993 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
10994 && 0 == (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
10996 && 0 == (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
10997 & nonzero_bits (XEXP (varop
, 1), int_result_mode
)))
10999 varop
= XEXP (varop
, 1);
11003 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11005 && CONST_INT_P (XEXP (varop
, 1))
11006 && (new_rtx
= simplify_const_binary_operation
11007 (ASHIFT
, int_result_mode
,
11008 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11009 GEN_INT (count
))) != 0
11010 && CONST_INT_P (new_rtx
)
11011 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
11012 INTVAL (new_rtx
), int_result_mode
,
11015 varop
= XEXP (varop
, 0);
11019 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11020 signbit', and attempt to change the PLUS to an XOR and move it to
11021 the outer operation as is done above in the AND/IOR/XOR case
11022 leg for shift(logical). See details in logical handling above
11023 for reasoning in doing so. */
11024 if (code
== LSHIFTRT
11025 && CONST_INT_P (XEXP (varop
, 1))
11026 && mode_signbit_p (int_result_mode
, XEXP (varop
, 1))
11027 && (new_rtx
= simplify_const_binary_operation
11028 (code
, int_result_mode
,
11029 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11030 GEN_INT (count
))) != 0
11031 && CONST_INT_P (new_rtx
)
11032 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
11033 INTVAL (new_rtx
), int_result_mode
,
11036 varop
= XEXP (varop
, 0);
11043 /* The following rules apply only to scalars. */
11044 if (shift_mode
!= shift_unit_mode
)
11046 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
11048 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11049 with C the size of VAROP - 1 and the shift is logical if
11050 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11051 we have a (gt X 0) operation. If the shift is arithmetic with
11052 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11053 we have a (neg (gt X 0)) operation. */
11055 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
11056 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
11057 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
11058 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
11059 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11060 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
11061 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
11064 varop
= gen_rtx_GT (int_varop_mode
, XEXP (varop
, 1),
11067 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
11068 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
11075 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11076 if the truncate does not affect the value. */
11077 if (code
== LSHIFTRT
11078 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
11079 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11080 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
11081 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop
, 0)))
11082 - GET_MODE_UNIT_PRECISION (GET_MODE (varop
)))))
11084 rtx varop_inner
= XEXP (varop
, 0);
11087 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
11088 XEXP (varop_inner
, 0),
11090 (count
+ INTVAL (XEXP (varop_inner
, 1))));
11091 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
11104 shift_mode
= result_mode
;
11105 if (shift_mode
!= mode
)
11107 /* We only change the modes of scalar shifts. */
11108 int_mode
= as_a
<scalar_int_mode
> (mode
);
11109 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11110 shift_mode
= try_widen_shift_mode (code
, varop
, count
, int_result_mode
,
11111 int_mode
, outer_op
, outer_const
);
11114 /* We have now finished analyzing the shift. The result should be
11115 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11116 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11117 to the result of the shift. OUTER_CONST is the relevant constant,
11118 but we must turn off all bits turned off in the shift. */
11120 if (outer_op
== UNKNOWN
11121 && orig_code
== code
&& orig_count
== count
11122 && varop
== orig_varop
11123 && shift_mode
== GET_MODE (varop
))
11126 /* Make a SUBREG if necessary. If we can't make it, fail. */
11127 varop
= gen_lowpart (shift_mode
, varop
);
11128 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
11131 /* If we have an outer operation and we just made a shift, it is
11132 possible that we could have simplified the shift were it not
11133 for the outer operation. So try to do the simplification
11136 if (outer_op
!= UNKNOWN
)
11137 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
11142 x
= simplify_gen_binary (code
, shift_mode
, varop
, GEN_INT (count
));
11144 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11145 turn off all the bits that the shift would have turned off. */
11146 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
11147 /* We only change the modes of scalar shifts. */
11148 x
= simplify_and_const_int (NULL_RTX
, as_a
<scalar_int_mode
> (shift_mode
),
11149 x
, GET_MODE_MASK (result_mode
) >> orig_count
);
11151 /* Do the remainder of the processing in RESULT_MODE. */
11152 x
= gen_lowpart_or_truncate (result_mode
, x
);
11154 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11157 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
11159 if (outer_op
!= UNKNOWN
)
11161 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11163 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
11164 && GET_MODE_PRECISION (int_result_mode
) < HOST_BITS_PER_WIDE_INT
)
11165 outer_const
= trunc_int_for_mode (outer_const
, int_result_mode
);
11167 if (outer_op
== AND
)
11168 x
= simplify_and_const_int (NULL_RTX
, int_result_mode
, x
, outer_const
);
11169 else if (outer_op
== SET
)
11171 /* This means that we have determined that the result is
11172 equivalent to a constant. This should be rare. */
11173 if (!side_effects_p (x
))
11174 x
= GEN_INT (outer_const
);
11176 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
11177 x
= simplify_gen_unary (outer_op
, int_result_mode
, x
, int_result_mode
);
11179 x
= simplify_gen_binary (outer_op
, int_result_mode
, x
,
11180 GEN_INT (outer_const
));
11186 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11187 The result of the shift is RESULT_MODE. If we cannot simplify it,
11188 return X or, if it is NULL, synthesize the expression with
11189 simplify_gen_binary. Otherwise, return a simplified value.
11191 The shift is normally computed in the widest mode we find in VAROP, as
11192 long as it isn't a different number of words than RESULT_MODE. Exceptions
11193 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11196 simplify_shift_const (rtx x
, enum rtx_code code
, machine_mode result_mode
,
11197 rtx varop
, int count
)
11199 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
11204 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
, GEN_INT (count
));
11205 if (GET_MODE (x
) != result_mode
)
11206 x
= gen_lowpart (result_mode
, x
);
11211 /* A subroutine of recog_for_combine. See there for arguments and
11215 recog_for_combine_1 (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11217 rtx pat
= *pnewpat
;
11218 rtx pat_without_clobbers
;
11219 int insn_code_number
;
11220 int num_clobbers_to_add
= 0;
11222 rtx notes
= NULL_RTX
;
11223 rtx old_notes
, old_pat
;
11226 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11227 we use to indicate that something didn't match. If we find such a
11228 thing, force rejection. */
11229 if (GET_CODE (pat
) == PARALLEL
)
11230 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
11231 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
11232 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
11235 old_pat
= PATTERN (insn
);
11236 old_notes
= REG_NOTES (insn
);
11237 PATTERN (insn
) = pat
;
11238 REG_NOTES (insn
) = NULL_RTX
;
11240 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11241 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11243 if (insn_code_number
< 0)
11244 fputs ("Failed to match this instruction:\n", dump_file
);
11246 fputs ("Successfully matched this instruction:\n", dump_file
);
11247 print_rtl_single (dump_file
, pat
);
11250 /* If it isn't, there is the possibility that we previously had an insn
11251 that clobbered some register as a side effect, but the combined
11252 insn doesn't need to do that. So try once more without the clobbers
11253 unless this represents an ASM insn. */
11255 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
11256 && GET_CODE (pat
) == PARALLEL
)
11260 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
11261 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
11264 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
11268 SUBST_INT (XVECLEN (pat
, 0), pos
);
11271 pat
= XVECEXP (pat
, 0, 0);
11273 PATTERN (insn
) = pat
;
11274 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11275 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11277 if (insn_code_number
< 0)
11278 fputs ("Failed to match this instruction:\n", dump_file
);
11280 fputs ("Successfully matched this instruction:\n", dump_file
);
11281 print_rtl_single (dump_file
, pat
);
11285 pat_without_clobbers
= pat
;
11287 PATTERN (insn
) = old_pat
;
11288 REG_NOTES (insn
) = old_notes
;
11290 /* Recognize all noop sets, these will be killed by followup pass. */
11291 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
11292 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
11294 /* If we had any clobbers to add, make a new pattern than contains
11295 them. Then check to make sure that all of them are dead. */
11296 if (num_clobbers_to_add
)
11298 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
11299 rtvec_alloc (GET_CODE (pat
) == PARALLEL
11300 ? (XVECLEN (pat
, 0)
11301 + num_clobbers_to_add
)
11302 : num_clobbers_to_add
+ 1));
11304 if (GET_CODE (pat
) == PARALLEL
)
11305 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11306 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
11308 XVECEXP (newpat
, 0, 0) = pat
;
11310 add_clobbers (newpat
, insn_code_number
);
11312 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
11313 i
< XVECLEN (newpat
, 0); i
++)
11315 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
11316 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
11318 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
11320 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
11321 notes
= alloc_reg_note (REG_UNUSED
,
11322 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
11328 if (insn_code_number
>= 0
11329 && insn_code_number
!= NOOP_MOVE_INSN_CODE
)
11331 old_pat
= PATTERN (insn
);
11332 old_notes
= REG_NOTES (insn
);
11333 old_icode
= INSN_CODE (insn
);
11334 PATTERN (insn
) = pat
;
11335 REG_NOTES (insn
) = notes
;
11336 INSN_CODE (insn
) = insn_code_number
;
11338 /* Allow targets to reject combined insn. */
11339 if (!targetm
.legitimate_combined_insn (insn
))
11341 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11342 fputs ("Instruction not appropriate for target.",
11345 /* Callers expect recog_for_combine to strip
11346 clobbers from the pattern on failure. */
11347 pat
= pat_without_clobbers
;
11350 insn_code_number
= -1;
11353 PATTERN (insn
) = old_pat
;
11354 REG_NOTES (insn
) = old_notes
;
11355 INSN_CODE (insn
) = old_icode
;
11361 return insn_code_number
;
11364 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11365 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11366 Return whether anything was so changed. */
11369 change_zero_ext (rtx pat
)
11371 bool changed
= false;
11372 rtx
*src
= &SET_SRC (pat
);
11374 subrtx_ptr_iterator::array_type array
;
11375 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11378 scalar_int_mode mode
, inner_mode
;
11379 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
11383 if (GET_CODE (x
) == ZERO_EXTRACT
11384 && CONST_INT_P (XEXP (x
, 1))
11385 && CONST_INT_P (XEXP (x
, 2))
11386 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
)
11387 && GET_MODE_PRECISION (inner_mode
) <= GET_MODE_PRECISION (mode
))
11389 size
= INTVAL (XEXP (x
, 1));
11391 int start
= INTVAL (XEXP (x
, 2));
11392 if (BITS_BIG_ENDIAN
)
11393 start
= GET_MODE_PRECISION (inner_mode
) - size
- start
;
11396 x
= gen_rtx_LSHIFTRT (inner_mode
, XEXP (x
, 0), GEN_INT (start
));
11399 if (mode
!= inner_mode
)
11400 x
= gen_lowpart_SUBREG (mode
, x
);
11402 else if (GET_CODE (x
) == ZERO_EXTEND
11403 && GET_CODE (XEXP (x
, 0)) == SUBREG
11404 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x
, 0))))
11405 && !paradoxical_subreg_p (XEXP (x
, 0))
11406 && subreg_lowpart_p (XEXP (x
, 0)))
11408 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11409 size
= GET_MODE_PRECISION (inner_mode
);
11410 x
= SUBREG_REG (XEXP (x
, 0));
11411 if (GET_MODE (x
) != mode
)
11412 x
= gen_lowpart_SUBREG (mode
, x
);
11414 else if (GET_CODE (x
) == ZERO_EXTEND
11415 && REG_P (XEXP (x
, 0))
11416 && HARD_REGISTER_P (XEXP (x
, 0))
11417 && can_change_dest_mode (XEXP (x
, 0), 0, mode
))
11419 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11420 size
= GET_MODE_PRECISION (inner_mode
);
11421 x
= gen_rtx_REG (mode
, REGNO (XEXP (x
, 0)));
11426 if (!(GET_CODE (x
) == LSHIFTRT
11427 && CONST_INT_P (XEXP (x
, 1))
11428 && size
+ INTVAL (XEXP (x
, 1)) == GET_MODE_PRECISION (mode
)))
11430 wide_int mask
= wi::mask (size
, false, GET_MODE_PRECISION (mode
));
11431 x
= gen_rtx_AND (mode
, x
, immed_wide_int_const (mask
, mode
));
11439 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11440 maybe_swap_commutative_operands (**iter
);
11442 rtx
*dst
= &SET_DEST (pat
);
11443 scalar_int_mode mode
;
11444 if (GET_CODE (*dst
) == ZERO_EXTRACT
11445 && REG_P (XEXP (*dst
, 0))
11446 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (*dst
, 0)), &mode
)
11447 && CONST_INT_P (XEXP (*dst
, 1))
11448 && CONST_INT_P (XEXP (*dst
, 2)))
11450 rtx reg
= XEXP (*dst
, 0);
11451 int width
= INTVAL (XEXP (*dst
, 1));
11452 int offset
= INTVAL (XEXP (*dst
, 2));
11453 int reg_width
= GET_MODE_PRECISION (mode
);
11454 if (BITS_BIG_ENDIAN
)
11455 offset
= reg_width
- width
- offset
;
11458 wide_int mask
= wi::shifted_mask (offset
, width
, true, reg_width
);
11459 wide_int mask2
= wi::shifted_mask (offset
, width
, false, reg_width
);
11460 x
= gen_rtx_AND (mode
, reg
, immed_wide_int_const (mask
, mode
));
11462 y
= gen_rtx_ASHIFT (mode
, SET_SRC (pat
), GEN_INT (offset
));
11465 z
= gen_rtx_AND (mode
, y
, immed_wide_int_const (mask2
, mode
));
11466 w
= gen_rtx_IOR (mode
, x
, z
);
11467 SUBST (SET_DEST (pat
), reg
);
11468 SUBST (SET_SRC (pat
), w
);
11476 /* Like recog, but we receive the address of a pointer to a new pattern.
11477 We try to match the rtx that the pointer points to.
11478 If that fails, we may try to modify or replace the pattern,
11479 storing the replacement into the same pointer object.
11481 Modifications include deletion or addition of CLOBBERs. If the
11482 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11483 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11484 (and undo if that fails).
11486 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11487 the CLOBBERs are placed.
11489 The value is the final insn code from the pattern ultimately matched,
11493 recog_for_combine (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11495 rtx pat
= *pnewpat
;
11496 int insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11497 if (insn_code_number
>= 0 || check_asm_operands (pat
))
11498 return insn_code_number
;
11500 void *marker
= get_undo_marker ();
11501 bool changed
= false;
11503 if (GET_CODE (pat
) == SET
)
11504 changed
= change_zero_ext (pat
);
11505 else if (GET_CODE (pat
) == PARALLEL
)
11508 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11510 rtx set
= XVECEXP (pat
, 0, i
);
11511 if (GET_CODE (set
) == SET
)
11512 changed
|= change_zero_ext (set
);
11518 insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11520 if (insn_code_number
< 0)
11521 undo_to_marker (marker
);
11524 return insn_code_number
;
11527 /* Like gen_lowpart_general but for use by combine. In combine it
11528 is not possible to create any new pseudoregs. However, it is
11529 safe to create invalid memory addresses, because combine will
11530 try to recognize them and all they will do is make the combine
11533 If for some reason this cannot do its job, an rtx
11534 (clobber (const_int 0)) is returned.
11535 An insn containing that will not be recognized. */
11538 gen_lowpart_for_combine (machine_mode omode
, rtx x
)
11540 machine_mode imode
= GET_MODE (x
);
11541 unsigned int osize
= GET_MODE_SIZE (omode
);
11542 unsigned int isize
= GET_MODE_SIZE (imode
);
11545 if (omode
== imode
)
11548 /* We can only support MODE being wider than a word if X is a
11549 constant integer or has a mode the same size. */
11550 if (GET_MODE_SIZE (omode
) > UNITS_PER_WORD
11551 && ! (CONST_SCALAR_INT_P (x
) || isize
== osize
))
11554 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11555 won't know what to do. So we will strip off the SUBREG here and
11556 process normally. */
11557 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
11559 x
= SUBREG_REG (x
);
11561 /* For use in case we fall down into the address adjustments
11562 further below, we need to adjust the known mode and size of
11563 x; imode and isize, since we just adjusted x. */
11564 imode
= GET_MODE (x
);
11566 if (imode
== omode
)
11569 isize
= GET_MODE_SIZE (imode
);
11572 result
= gen_lowpart_common (omode
, x
);
11581 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11583 if (MEM_VOLATILE_P (x
)
11584 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
)))
11587 /* If we want to refer to something bigger than the original memref,
11588 generate a paradoxical subreg instead. That will force a reload
11589 of the original memref X. */
11590 if (paradoxical_subreg_p (omode
, imode
))
11591 return gen_rtx_SUBREG (omode
, x
, 0);
11593 if (WORDS_BIG_ENDIAN
)
11594 offset
= MAX (isize
, UNITS_PER_WORD
) - MAX (osize
, UNITS_PER_WORD
);
11596 /* Adjust the address so that the address-after-the-data is
11598 if (BYTES_BIG_ENDIAN
)
11599 offset
-= MIN (UNITS_PER_WORD
, osize
) - MIN (UNITS_PER_WORD
, isize
);
11601 return adjust_address_nv (x
, omode
, offset
);
11604 /* If X is a comparison operator, rewrite it in a new mode. This
11605 probably won't match, but may allow further simplifications. */
11606 else if (COMPARISON_P (x
))
11607 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
11609 /* If we couldn't simplify X any other way, just enclose it in a
11610 SUBREG. Normally, this SUBREG won't match, but some patterns may
11611 include an explicit SUBREG or we may simplify it further in combine. */
11616 if (imode
== VOIDmode
)
11618 imode
= int_mode_for_mode (omode
).require ();
11619 x
= gen_lowpart_common (imode
, x
);
11623 res
= lowpart_subreg (omode
, x
, imode
);
11629 return gen_rtx_CLOBBER (omode
, const0_rtx
);
11632 /* Try to simplify a comparison between OP0 and a constant OP1,
11633 where CODE is the comparison code that will be tested, into a
11634 (CODE OP0 const0_rtx) form.
11636 The result is a possibly different comparison code to use.
11637 *POP1 may be updated. */
11639 static enum rtx_code
11640 simplify_compare_const (enum rtx_code code
, machine_mode mode
,
11641 rtx op0
, rtx
*pop1
)
11643 scalar_int_mode int_mode
;
11644 HOST_WIDE_INT const_op
= INTVAL (*pop1
);
11646 /* Get the constant we are comparing against and turn off all bits
11647 not on in our mode. */
11648 if (mode
!= VOIDmode
)
11649 const_op
= trunc_int_for_mode (const_op
, mode
);
11651 /* If we are comparing against a constant power of two and the value
11652 being compared can only have that single bit nonzero (e.g., it was
11653 `and'ed with that bit), we can replace this with a comparison
11656 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
11657 || code
== LT
|| code
== LTU
)
11658 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11659 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11660 && pow2p_hwi (const_op
& GET_MODE_MASK (int_mode
))
11661 && (nonzero_bits (op0
, int_mode
)
11662 == (unsigned HOST_WIDE_INT
) (const_op
& GET_MODE_MASK (int_mode
))))
11664 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11668 /* Similarly, if we are comparing a value known to be either -1 or
11669 0 with -1, change it to the opposite comparison against zero. */
11671 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11672 || code
== GEU
|| code
== LTU
)
11673 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11674 && num_sign_bit_copies (op0
, int_mode
) == GET_MODE_PRECISION (int_mode
))
11676 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11680 /* Do some canonicalizations based on the comparison code. We prefer
11681 comparisons against zero and then prefer equality comparisons.
11682 If we can reduce the size of a constant, we will do that too. */
11686 /* < C is equivalent to <= (C - 1) */
11691 /* ... fall through to LE case below. */
11692 gcc_fallthrough ();
11698 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11705 /* If we are doing a <= 0 comparison on a value known to have
11706 a zero sign bit, we can replace this with == 0. */
11707 else if (const_op
== 0
11708 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11709 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11710 && (nonzero_bits (op0
, int_mode
)
11711 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11717 /* >= C is equivalent to > (C - 1). */
11722 /* ... fall through to GT below. */
11723 gcc_fallthrough ();
11729 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11736 /* If we are doing a > 0 comparison on a value known to have
11737 a zero sign bit, we can replace this with != 0. */
11738 else if (const_op
== 0
11739 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11740 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11741 && (nonzero_bits (op0
, int_mode
)
11742 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11748 /* < C is equivalent to <= (C - 1). */
11753 /* ... fall through ... */
11755 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11756 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11757 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11758 && ((unsigned HOST_WIDE_INT
) const_op
11759 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11769 /* unsigned <= 0 is equivalent to == 0 */
11772 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11773 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11774 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11775 && ((unsigned HOST_WIDE_INT
) const_op
11776 == ((HOST_WIDE_INT_1U
11777 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1)))
11785 /* >= C is equivalent to > (C - 1). */
11790 /* ... fall through ... */
11793 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11794 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11795 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11796 && ((unsigned HOST_WIDE_INT
) const_op
11797 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11807 /* unsigned > 0 is equivalent to != 0 */
11810 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11811 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11812 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11813 && ((unsigned HOST_WIDE_INT
) const_op
11814 == (HOST_WIDE_INT_1U
11815 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1))
11826 *pop1
= GEN_INT (const_op
);
11830 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11831 comparison code that will be tested.
11833 The result is a possibly different comparison code to use. *POP0 and
11834 *POP1 may be updated.
11836 It is possible that we might detect that a comparison is either always
11837 true or always false. However, we do not perform general constant
11838 folding in combine, so this knowledge isn't useful. Such tautologies
11839 should have been detected earlier. Hence we ignore all such cases. */
11841 static enum rtx_code
11842 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
11848 scalar_int_mode mode
, inner_mode
, tmode
;
11849 opt_scalar_int_mode tmode_iter
;
11851 /* Try a few ways of applying the same transformation to both operands. */
11854 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11855 so check specially. */
11856 if (!WORD_REGISTER_OPERATIONS
11857 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
11858 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
11859 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11860 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
11861 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
11862 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
11863 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &mode
)
11864 && (is_a
<scalar_int_mode
>
11865 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))), &inner_mode
))
11866 && inner_mode
== GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0)))
11867 && CONST_INT_P (XEXP (op0
, 1))
11868 && XEXP (op0
, 1) == XEXP (op1
, 1)
11869 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
11870 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
11871 && (INTVAL (XEXP (op0
, 1))
11872 == (GET_MODE_PRECISION (mode
)
11873 - GET_MODE_PRECISION (inner_mode
))))
11875 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
11876 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
11879 /* If both operands are the same constant shift, see if we can ignore the
11880 shift. We can if the shift is a rotate or if the bits shifted out of
11881 this shift are known to be zero for both inputs and if the type of
11882 comparison is compatible with the shift. */
11883 if (GET_CODE (op0
) == GET_CODE (op1
)
11884 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
11885 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
11886 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
11887 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
11888 || (GET_CODE (op0
) == ASHIFTRT
11889 && (code
!= GTU
&& code
!= LTU
11890 && code
!= GEU
&& code
!= LEU
)))
11891 && CONST_INT_P (XEXP (op0
, 1))
11892 && INTVAL (XEXP (op0
, 1)) >= 0
11893 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
11894 && XEXP (op0
, 1) == XEXP (op1
, 1))
11896 machine_mode mode
= GET_MODE (op0
);
11897 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11898 int shift_count
= INTVAL (XEXP (op0
, 1));
11900 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
11901 mask
&= (mask
>> shift_count
) << shift_count
;
11902 else if (GET_CODE (op0
) == ASHIFT
)
11903 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
11905 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
11906 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
11907 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
11912 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11913 SUBREGs are of the same mode, and, in both cases, the AND would
11914 be redundant if the comparison was done in the narrower mode,
11915 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11916 and the operand's possibly nonzero bits are 0xffffff01; in that case
11917 if we only care about QImode, we don't need the AND). This case
11918 occurs if the output mode of an scc insn is not SImode and
11919 STORE_FLAG_VALUE == 1 (e.g., the 386).
11921 Similarly, check for a case where the AND's are ZERO_EXTEND
11922 operations from some narrower mode even though a SUBREG is not
11925 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
11926 && CONST_INT_P (XEXP (op0
, 1))
11927 && CONST_INT_P (XEXP (op1
, 1)))
11929 rtx inner_op0
= XEXP (op0
, 0);
11930 rtx inner_op1
= XEXP (op1
, 0);
11931 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
11932 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
11935 if (paradoxical_subreg_p (inner_op0
)
11936 && GET_CODE (inner_op1
) == SUBREG
11937 && (GET_MODE (SUBREG_REG (inner_op0
))
11938 == GET_MODE (SUBREG_REG (inner_op1
)))
11939 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0
)))
11940 <= HOST_BITS_PER_WIDE_INT
)
11941 && (0 == ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
11942 GET_MODE (SUBREG_REG (inner_op0
)))))
11943 && (0 == ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
11944 GET_MODE (SUBREG_REG (inner_op1
))))))
11946 op0
= SUBREG_REG (inner_op0
);
11947 op1
= SUBREG_REG (inner_op1
);
11949 /* The resulting comparison is always unsigned since we masked
11950 off the original sign bit. */
11951 code
= unsigned_condition (code
);
11957 FOR_EACH_MODE_UNTIL (tmode
,
11958 as_a
<scalar_int_mode
> (GET_MODE (op0
)))
11959 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
11961 op0
= gen_lowpart_or_truncate (tmode
, inner_op0
);
11962 op1
= gen_lowpart_or_truncate (tmode
, inner_op1
);
11963 code
= unsigned_condition (code
);
11972 /* If both operands are NOT, we can strip off the outer operation
11973 and adjust the comparison code for swapped operands; similarly for
11974 NEG, except that this must be an equality comparison. */
11975 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
11976 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
11977 && (code
== EQ
|| code
== NE
)))
11978 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
11984 /* If the first operand is a constant, swap the operands and adjust the
11985 comparison code appropriately, but don't do this if the second operand
11986 is already a constant integer. */
11987 if (swap_commutative_operands_p (op0
, op1
))
11989 std::swap (op0
, op1
);
11990 code
= swap_condition (code
);
11993 /* We now enter a loop during which we will try to simplify the comparison.
11994 For the most part, we only are concerned with comparisons with zero,
11995 but some things may really be comparisons with zero but not start
11996 out looking that way. */
11998 while (CONST_INT_P (op1
))
12000 machine_mode raw_mode
= GET_MODE (op0
);
12001 scalar_int_mode int_mode
;
12002 int equality_comparison_p
;
12003 int sign_bit_comparison_p
;
12004 int unsigned_comparison_p
;
12005 HOST_WIDE_INT const_op
;
12007 /* We only want to handle integral modes. This catches VOIDmode,
12008 CCmode, and the floating-point modes. An exception is that we
12009 can handle VOIDmode if OP0 is a COMPARE or a comparison
12012 if (GET_MODE_CLASS (raw_mode
) != MODE_INT
12013 && ! (raw_mode
== VOIDmode
12014 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
12017 /* Try to simplify the compare to constant, possibly changing the
12018 comparison op, and/or changing op1 to zero. */
12019 code
= simplify_compare_const (code
, raw_mode
, op0
, &op1
);
12020 const_op
= INTVAL (op1
);
12022 /* Compute some predicates to simplify code below. */
12024 equality_comparison_p
= (code
== EQ
|| code
== NE
);
12025 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
12026 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
12029 /* If this is a sign bit comparison and we can do arithmetic in
12030 MODE, say that we will only be needing the sign bit of OP0. */
12031 if (sign_bit_comparison_p
12032 && is_a
<scalar_int_mode
> (raw_mode
, &int_mode
)
12033 && HWI_COMPUTABLE_MODE_P (int_mode
))
12034 op0
= force_to_mode (op0
, int_mode
,
12036 << (GET_MODE_PRECISION (int_mode
) - 1),
12039 if (COMPARISON_P (op0
))
12041 /* We can't do anything if OP0 is a condition code value, rather
12042 than an actual data value. */
12044 || CC0_P (XEXP (op0
, 0))
12045 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
12048 /* Get the two operands being compared. */
12049 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
12050 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
12052 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
12054 /* Check for the cases where we simply want the result of the
12055 earlier test or the opposite of that result. */
12056 if (code
== NE
|| code
== EQ
12057 || (val_signbit_known_set_p (raw_mode
, STORE_FLAG_VALUE
)
12058 && (code
== LT
|| code
== GE
)))
12060 enum rtx_code new_code
;
12061 if (code
== LT
|| code
== NE
)
12062 new_code
= GET_CODE (op0
);
12064 new_code
= reversed_comparison_code (op0
, NULL
);
12066 if (new_code
!= UNKNOWN
)
12077 if (raw_mode
== VOIDmode
)
12079 scalar_int_mode mode
= as_a
<scalar_int_mode
> (raw_mode
);
12081 /* Now try cases based on the opcode of OP0. If none of the cases
12082 does a "continue", we exit this loop immediately after the
12085 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
12086 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
12087 switch (GET_CODE (op0
))
12090 /* If we are extracting a single bit from a variable position in
12091 a constant that has only a single bit set and are comparing it
12092 with zero, we can convert this into an equality comparison
12093 between the position and the location of the single bit. */
12094 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12095 have already reduced the shift count modulo the word size. */
12096 if (!SHIFT_COUNT_TRUNCATED
12097 && CONST_INT_P (XEXP (op0
, 0))
12098 && XEXP (op0
, 1) == const1_rtx
12099 && equality_comparison_p
&& const_op
== 0
12100 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
12102 if (BITS_BIG_ENDIAN
)
12103 i
= BITS_PER_WORD
- 1 - i
;
12105 op0
= XEXP (op0
, 2);
12109 /* Result is nonzero iff shift count is equal to I. */
12110 code
= reverse_condition (code
);
12117 tem
= expand_compound_operation (op0
);
12126 /* If testing for equality, we can take the NOT of the constant. */
12127 if (equality_comparison_p
12128 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
12130 op0
= XEXP (op0
, 0);
12135 /* If just looking at the sign bit, reverse the sense of the
12137 if (sign_bit_comparison_p
)
12139 op0
= XEXP (op0
, 0);
12140 code
= (code
== GE
? LT
: GE
);
12146 /* If testing for equality, we can take the NEG of the constant. */
12147 if (equality_comparison_p
12148 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
12150 op0
= XEXP (op0
, 0);
12155 /* The remaining cases only apply to comparisons with zero. */
12159 /* When X is ABS or is known positive,
12160 (neg X) is < 0 if and only if X != 0. */
12162 if (sign_bit_comparison_p
12163 && (GET_CODE (XEXP (op0
, 0)) == ABS
12164 || (mode_width
<= HOST_BITS_PER_WIDE_INT
12165 && (nonzero_bits (XEXP (op0
, 0), mode
)
12166 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12169 op0
= XEXP (op0
, 0);
12170 code
= (code
== LT
? NE
: EQ
);
12174 /* If we have NEG of something whose two high-order bits are the
12175 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12176 if (num_sign_bit_copies (op0
, mode
) >= 2)
12178 op0
= XEXP (op0
, 0);
12179 code
= swap_condition (code
);
12185 /* If we are testing equality and our count is a constant, we
12186 can perform the inverse operation on our RHS. */
12187 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12188 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
12189 op1
, XEXP (op0
, 1))) != 0)
12191 op0
= XEXP (op0
, 0);
12196 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12197 a particular bit. Convert it to an AND of a constant of that
12198 bit. This will be converted into a ZERO_EXTRACT. */
12199 if (const_op
== 0 && sign_bit_comparison_p
12200 && CONST_INT_P (XEXP (op0
, 1))
12201 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12203 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12206 - INTVAL (XEXP (op0
, 1)))));
12207 code
= (code
== LT
? NE
: EQ
);
12211 /* Fall through. */
12214 /* ABS is ignorable inside an equality comparison with zero. */
12215 if (const_op
== 0 && equality_comparison_p
)
12217 op0
= XEXP (op0
, 0);
12223 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12224 (compare FOO CONST) if CONST fits in FOO's mode and we
12225 are either testing inequality or have an unsigned
12226 comparison with ZERO_EXTEND or a signed comparison with
12227 SIGN_EXTEND. But don't do it if we don't have a compare
12228 insn of the given mode, since we'd have to revert it
12229 later on, and then we wouldn't know whether to sign- or
12231 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12232 && ! unsigned_comparison_p
12233 && HWI_COMPUTABLE_MODE_P (mode
)
12234 && trunc_int_for_mode (const_op
, mode
) == const_op
12235 && have_insn_for (COMPARE
, mode
))
12237 op0
= XEXP (op0
, 0);
12243 /* Check for the case where we are comparing A - C1 with C2, that is
12245 (subreg:MODE (plus (A) (-C1))) op (C2)
12247 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12248 comparison in the wider mode. One of the following two conditions
12249 must be true in order for this to be valid:
12251 1. The mode extension results in the same bit pattern being added
12252 on both sides and the comparison is equality or unsigned. As
12253 C2 has been truncated to fit in MODE, the pattern can only be
12256 2. The mode extension results in the sign bit being copied on
12259 The difficulty here is that we have predicates for A but not for
12260 (A - C1) so we need to check that C1 is within proper bounds so
12261 as to perturbate A as little as possible. */
12263 if (mode_width
<= HOST_BITS_PER_WIDE_INT
12264 && subreg_lowpart_p (op0
)
12265 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
12267 && GET_MODE_PRECISION (inner_mode
) > mode_width
12268 && GET_CODE (SUBREG_REG (op0
)) == PLUS
12269 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
12271 rtx a
= XEXP (SUBREG_REG (op0
), 0);
12272 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
12275 && (unsigned HOST_WIDE_INT
) c1
12276 < HOST_WIDE_INT_1U
<< (mode_width
- 1)
12277 && (equality_comparison_p
|| unsigned_comparison_p
)
12278 /* (A - C1) zero-extends if it is positive and sign-extends
12279 if it is negative, C2 both zero- and sign-extends. */
12280 && ((0 == (nonzero_bits (a
, inner_mode
)
12281 & ~GET_MODE_MASK (mode
))
12283 /* (A - C1) sign-extends if it is positive and 1-extends
12284 if it is negative, C2 both sign- and 1-extends. */
12285 || (num_sign_bit_copies (a
, inner_mode
)
12286 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12289 || ((unsigned HOST_WIDE_INT
) c1
12290 < HOST_WIDE_INT_1U
<< (mode_width
- 2)
12291 /* (A - C1) always sign-extends, like C2. */
12292 && num_sign_bit_copies (a
, inner_mode
)
12293 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12294 - (mode_width
- 1))))
12296 op0
= SUBREG_REG (op0
);
12301 /* If the inner mode is narrower and we are extracting the low part,
12302 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12303 if (paradoxical_subreg_p (op0
))
12305 else if (subreg_lowpart_p (op0
)
12306 && GET_MODE_CLASS (mode
) == MODE_INT
12307 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12308 && (code
== NE
|| code
== EQ
)
12309 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12310 && !paradoxical_subreg_p (op0
)
12311 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12312 & ~GET_MODE_MASK (mode
)) == 0)
12314 /* Remove outer subregs that don't do anything. */
12315 tem
= gen_lowpart (inner_mode
, op1
);
12317 if ((nonzero_bits (tem
, inner_mode
)
12318 & ~GET_MODE_MASK (mode
)) == 0)
12320 op0
= SUBREG_REG (op0
);
12332 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12333 && (unsigned_comparison_p
|| equality_comparison_p
)
12334 && HWI_COMPUTABLE_MODE_P (mode
)
12335 && (unsigned HOST_WIDE_INT
) const_op
<= GET_MODE_MASK (mode
)
12337 && have_insn_for (COMPARE
, mode
))
12339 op0
= XEXP (op0
, 0);
12345 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12346 this for equality comparisons due to pathological cases involving
12348 if (equality_comparison_p
12349 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
12350 op1
, XEXP (op0
, 1))))
12352 op0
= XEXP (op0
, 0);
12357 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12358 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
12359 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
12361 op0
= XEXP (XEXP (op0
, 0), 0);
12362 code
= (code
== LT
? EQ
: NE
);
12368 /* We used to optimize signed comparisons against zero, but that
12369 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12370 arrive here as equality comparisons, or (GEU, LTU) are
12371 optimized away. No need to special-case them. */
12373 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12374 (eq B (minus A C)), whichever simplifies. We can only do
12375 this for equality comparisons due to pathological cases involving
12377 if (equality_comparison_p
12378 && 0 != (tem
= simplify_binary_operation (PLUS
, mode
,
12379 XEXP (op0
, 1), op1
)))
12381 op0
= XEXP (op0
, 0);
12386 if (equality_comparison_p
12387 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
12388 XEXP (op0
, 0), op1
)))
12390 op0
= XEXP (op0
, 1);
12395 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12396 of bits in X minus 1, is one iff X > 0. */
12397 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
12398 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12399 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
12400 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12402 op0
= XEXP (op0
, 1);
12403 code
= (code
== GE
? LE
: GT
);
12409 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12410 if C is zero or B is a constant. */
12411 if (equality_comparison_p
12412 && 0 != (tem
= simplify_binary_operation (XOR
, mode
,
12413 XEXP (op0
, 1), op1
)))
12415 op0
= XEXP (op0
, 0);
12423 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12425 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
12426 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
12427 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12429 op0
= XEXP (op0
, 1);
12430 code
= (code
== GE
? GT
: LE
);
12436 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12437 will be converted to a ZERO_EXTRACT later. */
12438 if (const_op
== 0 && equality_comparison_p
12439 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12440 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
12442 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
12443 XEXP (XEXP (op0
, 0), 1));
12444 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12448 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12449 zero and X is a comparison and C1 and C2 describe only bits set
12450 in STORE_FLAG_VALUE, we can compare with X. */
12451 if (const_op
== 0 && equality_comparison_p
12452 && mode_width
<= HOST_BITS_PER_WIDE_INT
12453 && CONST_INT_P (XEXP (op0
, 1))
12454 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
12455 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12456 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
12457 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
12459 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12460 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
12461 if ((~STORE_FLAG_VALUE
& mask
) == 0
12462 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
12463 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
12464 && COMPARISON_P (tem
))))
12466 op0
= XEXP (XEXP (op0
, 0), 0);
12471 /* If we are doing an equality comparison of an AND of a bit equal
12472 to the sign bit, replace this with a LT or GE comparison of
12473 the underlying value. */
12474 if (equality_comparison_p
12476 && CONST_INT_P (XEXP (op0
, 1))
12477 && mode_width
<= HOST_BITS_PER_WIDE_INT
12478 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12479 == HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12481 op0
= XEXP (op0
, 0);
12482 code
= (code
== EQ
? GE
: LT
);
12486 /* If this AND operation is really a ZERO_EXTEND from a narrower
12487 mode, the constant fits within that mode, and this is either an
12488 equality or unsigned comparison, try to do this comparison in
12493 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12494 -> (ne:DI (reg:SI 4) (const_int 0))
12496 unless TRULY_NOOP_TRUNCATION allows it or the register is
12497 known to hold a value of the required mode the
12498 transformation is invalid. */
12499 if ((equality_comparison_p
|| unsigned_comparison_p
)
12500 && CONST_INT_P (XEXP (op0
, 1))
12501 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
12502 & GET_MODE_MASK (mode
))
12504 && const_op
>> i
== 0
12505 && int_mode_for_size (i
, 1).exists (&tmode
))
12507 op0
= gen_lowpart_or_truncate (tmode
, XEXP (op0
, 0));
12511 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12512 fits in both M1 and M2 and the SUBREG is either paradoxical
12513 or represents the low part, permute the SUBREG and the AND
12515 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
12516 && CONST_INT_P (XEXP (op0
, 1)))
12518 unsigned HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
12519 /* Require an integral mode, to avoid creating something like
12521 if ((is_a
<scalar_int_mode
>
12522 (GET_MODE (SUBREG_REG (XEXP (op0
, 0))), &tmode
))
12523 /* It is unsafe to commute the AND into the SUBREG if the
12524 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12525 not defined. As originally written the upper bits
12526 have a defined value due to the AND operation.
12527 However, if we commute the AND inside the SUBREG then
12528 they no longer have defined values and the meaning of
12529 the code has been changed.
12530 Also C1 should not change value in the smaller mode,
12531 see PR67028 (a positive C1 can become negative in the
12532 smaller mode, so that the AND does no longer mask the
12534 && ((WORD_REGISTER_OPERATIONS
12535 && mode_width
> GET_MODE_PRECISION (tmode
)
12536 && mode_width
<= BITS_PER_WORD
12537 && trunc_int_for_mode (c1
, tmode
) == (HOST_WIDE_INT
) c1
)
12538 || (mode_width
<= GET_MODE_PRECISION (tmode
)
12539 && subreg_lowpart_p (XEXP (op0
, 0))))
12540 && mode_width
<= HOST_BITS_PER_WIDE_INT
12541 && HWI_COMPUTABLE_MODE_P (tmode
)
12542 && (c1
& ~mask
) == 0
12543 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
12545 && c1
!= GET_MODE_MASK (tmode
))
12547 op0
= simplify_gen_binary (AND
, tmode
,
12548 SUBREG_REG (XEXP (op0
, 0)),
12549 gen_int_mode (c1
, tmode
));
12550 op0
= gen_lowpart (mode
, op0
);
12555 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12556 if (const_op
== 0 && equality_comparison_p
12557 && XEXP (op0
, 1) == const1_rtx
12558 && GET_CODE (XEXP (op0
, 0)) == NOT
)
12560 op0
= simplify_and_const_int (NULL_RTX
, mode
,
12561 XEXP (XEXP (op0
, 0), 0), 1);
12562 code
= (code
== NE
? EQ
: NE
);
12566 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12567 (eq (and (lshiftrt X) 1) 0).
12568 Also handle the case where (not X) is expressed using xor. */
12569 if (const_op
== 0 && equality_comparison_p
12570 && XEXP (op0
, 1) == const1_rtx
12571 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
12573 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
12574 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
12576 if (GET_CODE (shift_op
) == NOT
12577 || (GET_CODE (shift_op
) == XOR
12578 && CONST_INT_P (XEXP (shift_op
, 1))
12579 && CONST_INT_P (shift_count
)
12580 && HWI_COMPUTABLE_MODE_P (mode
)
12581 && (UINTVAL (XEXP (shift_op
, 1))
12582 == HOST_WIDE_INT_1U
12583 << INTVAL (shift_count
))))
12586 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
12587 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12588 code
= (code
== NE
? EQ
: NE
);
12595 /* If we have (compare (ashift FOO N) (const_int C)) and
12596 the high order N bits of FOO (N+1 if an inequality comparison)
12597 are known to be zero, we can do this by comparing FOO with C
12598 shifted right N bits so long as the low-order N bits of C are
12600 if (CONST_INT_P (XEXP (op0
, 1))
12601 && INTVAL (XEXP (op0
, 1)) >= 0
12602 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
12603 < HOST_BITS_PER_WIDE_INT
)
12604 && (((unsigned HOST_WIDE_INT
) const_op
12605 & ((HOST_WIDE_INT_1U
<< INTVAL (XEXP (op0
, 1)))
12607 && mode_width
<= HOST_BITS_PER_WIDE_INT
12608 && (nonzero_bits (XEXP (op0
, 0), mode
)
12609 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
12610 + ! equality_comparison_p
))) == 0)
12612 /* We must perform a logical shift, not an arithmetic one,
12613 as we want the top N bits of C to be zero. */
12614 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
12616 temp
>>= INTVAL (XEXP (op0
, 1));
12617 op1
= gen_int_mode (temp
, mode
);
12618 op0
= XEXP (op0
, 0);
12622 /* If we are doing a sign bit comparison, it means we are testing
12623 a particular bit. Convert it to the appropriate AND. */
12624 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12625 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12627 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12630 - INTVAL (XEXP (op0
, 1)))));
12631 code
= (code
== LT
? NE
: EQ
);
12635 /* If this an equality comparison with zero and we are shifting
12636 the low bit to the sign bit, we can convert this to an AND of the
12638 if (const_op
== 0 && equality_comparison_p
12639 && CONST_INT_P (XEXP (op0
, 1))
12640 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12642 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
12648 /* If this is an equality comparison with zero, we can do this
12649 as a logical shift, which might be much simpler. */
12650 if (equality_comparison_p
&& const_op
== 0
12651 && CONST_INT_P (XEXP (op0
, 1)))
12653 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
12655 INTVAL (XEXP (op0
, 1)));
12659 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12660 do the comparison in a narrower mode. */
12661 if (! unsigned_comparison_p
12662 && CONST_INT_P (XEXP (op0
, 1))
12663 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12664 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12665 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12667 && (((unsigned HOST_WIDE_INT
) const_op
12668 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12669 <= GET_MODE_MASK (tmode
)))
12671 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
12675 /* Likewise if OP0 is a PLUS of a sign extension with a
12676 constant, which is usually represented with the PLUS
12677 between the shifts. */
12678 if (! unsigned_comparison_p
12679 && CONST_INT_P (XEXP (op0
, 1))
12680 && GET_CODE (XEXP (op0
, 0)) == PLUS
12681 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12682 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
12683 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
12684 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12686 && (((unsigned HOST_WIDE_INT
) const_op
12687 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12688 <= GET_MODE_MASK (tmode
)))
12690 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
12691 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
12692 rtx new_const
= simplify_gen_binary (ASHIFTRT
, mode
,
12693 add_const
, XEXP (op0
, 1));
12695 op0
= simplify_gen_binary (PLUS
, tmode
,
12696 gen_lowpart (tmode
, inner
),
12703 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12704 the low order N bits of FOO are known to be zero, we can do this
12705 by comparing FOO with C shifted left N bits so long as no
12706 overflow occurs. Even if the low order N bits of FOO aren't known
12707 to be zero, if the comparison is >= or < we can use the same
12708 optimization and for > or <= by setting all the low
12709 order N bits in the comparison constant. */
12710 if (CONST_INT_P (XEXP (op0
, 1))
12711 && INTVAL (XEXP (op0
, 1)) > 0
12712 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12713 && mode_width
<= HOST_BITS_PER_WIDE_INT
12714 && (((unsigned HOST_WIDE_INT
) const_op
12715 + (GET_CODE (op0
) != LSHIFTRT
12716 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
12719 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
12721 unsigned HOST_WIDE_INT low_bits
12722 = (nonzero_bits (XEXP (op0
, 0), mode
)
12723 & ((HOST_WIDE_INT_1U
12724 << INTVAL (XEXP (op0
, 1))) - 1));
12725 if (low_bits
== 0 || !equality_comparison_p
)
12727 /* If the shift was logical, then we must make the condition
12729 if (GET_CODE (op0
) == LSHIFTRT
)
12730 code
= unsigned_condition (code
);
12732 const_op
= (unsigned HOST_WIDE_INT
) const_op
12733 << INTVAL (XEXP (op0
, 1));
12735 && (code
== GT
|| code
== GTU
12736 || code
== LE
|| code
== LEU
))
12738 |= ((HOST_WIDE_INT_1
<< INTVAL (XEXP (op0
, 1))) - 1);
12739 op1
= GEN_INT (const_op
);
12740 op0
= XEXP (op0
, 0);
12745 /* If we are using this shift to extract just the sign bit, we
12746 can replace this with an LT or GE comparison. */
12748 && (equality_comparison_p
|| sign_bit_comparison_p
)
12749 && CONST_INT_P (XEXP (op0
, 1))
12750 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12752 op0
= XEXP (op0
, 0);
12753 code
= (code
== NE
|| code
== GT
? LT
: GE
);
12765 /* Now make any compound operations involved in this comparison. Then,
12766 check for an outmost SUBREG on OP0 that is not doing anything or is
12767 paradoxical. The latter transformation must only be performed when
12768 it is known that the "extra" bits will be the same in op0 and op1 or
12769 that they don't matter. There are three cases to consider:
12771 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12772 care bits and we can assume they have any convenient value. So
12773 making the transformation is safe.
12775 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12776 In this case the upper bits of op0 are undefined. We should not make
12777 the simplification in that case as we do not know the contents of
12780 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12781 In that case we know those bits are zeros or ones. We must also be
12782 sure that they are the same as the upper bits of op1.
12784 We can never remove a SUBREG for a non-equality comparison because
12785 the sign bit is in a different place in the underlying object. */
12787 rtx_code op0_mco_code
= SET
;
12788 if (op1
== const0_rtx
)
12789 op0_mco_code
= code
== NE
|| code
== EQ
? EQ
: COMPARE
;
12791 op0
= make_compound_operation (op0
, op0_mco_code
);
12792 op1
= make_compound_operation (op1
, SET
);
12794 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
12795 && is_int_mode (GET_MODE (op0
), &mode
)
12796 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12797 && (code
== NE
|| code
== EQ
))
12799 if (paradoxical_subreg_p (op0
))
12801 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12803 if (REG_P (SUBREG_REG (op0
)))
12805 op0
= SUBREG_REG (op0
);
12806 op1
= gen_lowpart (inner_mode
, op1
);
12809 else if (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12810 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12811 & ~GET_MODE_MASK (mode
)) == 0)
12813 tem
= gen_lowpart (inner_mode
, op1
);
12815 if ((nonzero_bits (tem
, inner_mode
) & ~GET_MODE_MASK (mode
)) == 0)
12816 op0
= SUBREG_REG (op0
), op1
= tem
;
12820 /* We now do the opposite procedure: Some machines don't have compare
12821 insns in all modes. If OP0's mode is an integer mode smaller than a
12822 word and we can't do a compare in that mode, see if there is a larger
12823 mode for which we can do the compare. There are a number of cases in
12824 which we can use the wider mode. */
12826 if (is_int_mode (GET_MODE (op0
), &mode
)
12827 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
12828 && ! have_insn_for (COMPARE
, mode
))
12829 FOR_EACH_WIDER_MODE (tmode_iter
, mode
)
12831 tmode
= tmode_iter
.require ();
12832 if (!HWI_COMPUTABLE_MODE_P (tmode
))
12834 if (have_insn_for (COMPARE
, tmode
))
12838 /* If this is a test for negative, we can make an explicit
12839 test of the sign bit. Test this first so we can use
12840 a paradoxical subreg to extend OP0. */
12842 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
12843 && HWI_COMPUTABLE_MODE_P (mode
))
12845 unsigned HOST_WIDE_INT sign
12846 = HOST_WIDE_INT_1U
<< (GET_MODE_BITSIZE (mode
) - 1);
12847 op0
= simplify_gen_binary (AND
, tmode
,
12848 gen_lowpart (tmode
, op0
),
12849 gen_int_mode (sign
, tmode
));
12850 code
= (code
== LT
) ? NE
: EQ
;
12854 /* If the only nonzero bits in OP0 and OP1 are those in the
12855 narrower mode and this is an equality or unsigned comparison,
12856 we can use the wider mode. Similarly for sign-extended
12857 values, in which case it is true for all comparisons. */
12858 zero_extended
= ((code
== EQ
|| code
== NE
12859 || code
== GEU
|| code
== GTU
12860 || code
== LEU
|| code
== LTU
)
12861 && (nonzero_bits (op0
, tmode
)
12862 & ~GET_MODE_MASK (mode
)) == 0
12863 && ((CONST_INT_P (op1
)
12864 || (nonzero_bits (op1
, tmode
)
12865 & ~GET_MODE_MASK (mode
)) == 0)));
12868 || ((num_sign_bit_copies (op0
, tmode
)
12869 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12870 - GET_MODE_PRECISION (mode
)))
12871 && (num_sign_bit_copies (op1
, tmode
)
12872 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12873 - GET_MODE_PRECISION (mode
)))))
12875 /* If OP0 is an AND and we don't have an AND in MODE either,
12876 make a new AND in the proper mode. */
12877 if (GET_CODE (op0
) == AND
12878 && !have_insn_for (AND
, mode
))
12879 op0
= simplify_gen_binary (AND
, tmode
,
12880 gen_lowpart (tmode
,
12882 gen_lowpart (tmode
,
12888 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
12890 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
12895 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
12897 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
12906 /* We may have changed the comparison operands. Re-canonicalize. */
12907 if (swap_commutative_operands_p (op0
, op1
))
12909 std::swap (op0
, op1
);
12910 code
= swap_condition (code
);
12913 /* If this machine only supports a subset of valid comparisons, see if we
12914 can convert an unsupported one into a supported one. */
12915 target_canonicalize_comparison (&code
, &op0
, &op1
, 0);
12923 /* Utility function for record_value_for_reg. Count number of
12928 enum rtx_code code
= GET_CODE (x
);
12932 if (GET_RTX_CLASS (code
) == RTX_BIN_ARITH
12933 || GET_RTX_CLASS (code
) == RTX_COMM_ARITH
)
12935 rtx x0
= XEXP (x
, 0);
12936 rtx x1
= XEXP (x
, 1);
12939 return 1 + 2 * count_rtxs (x0
);
12941 if ((GET_RTX_CLASS (GET_CODE (x1
)) == RTX_BIN_ARITH
12942 || GET_RTX_CLASS (GET_CODE (x1
)) == RTX_COMM_ARITH
)
12943 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12944 return 2 + 2 * count_rtxs (x0
)
12945 + count_rtxs (x
== XEXP (x1
, 0)
12946 ? XEXP (x1
, 1) : XEXP (x1
, 0));
12948 if ((GET_RTX_CLASS (GET_CODE (x0
)) == RTX_BIN_ARITH
12949 || GET_RTX_CLASS (GET_CODE (x0
)) == RTX_COMM_ARITH
)
12950 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12951 return 2 + 2 * count_rtxs (x1
)
12952 + count_rtxs (x
== XEXP (x0
, 0)
12953 ? XEXP (x0
, 1) : XEXP (x0
, 0));
12956 fmt
= GET_RTX_FORMAT (code
);
12957 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12959 ret
+= count_rtxs (XEXP (x
, i
));
12960 else if (fmt
[i
] == 'E')
12961 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12962 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
12967 /* Utility function for following routine. Called when X is part of a value
12968 being stored into last_set_value. Sets last_set_table_tick
12969 for each register mentioned. Similar to mention_regs in cse.c */
12972 update_table_tick (rtx x
)
12974 enum rtx_code code
= GET_CODE (x
);
12975 const char *fmt
= GET_RTX_FORMAT (code
);
12980 unsigned int regno
= REGNO (x
);
12981 unsigned int endregno
= END_REGNO (x
);
12984 for (r
= regno
; r
< endregno
; r
++)
12986 reg_stat_type
*rsp
= ®_stat
[r
];
12987 rsp
->last_set_table_tick
= label_tick
;
12993 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12996 /* Check for identical subexpressions. If x contains
12997 identical subexpression we only have to traverse one of
12999 if (i
== 0 && ARITHMETIC_P (x
))
13001 /* Note that at this point x1 has already been
13003 rtx x0
= XEXP (x
, 0);
13004 rtx x1
= XEXP (x
, 1);
13006 /* If x0 and x1 are identical then there is no need to
13011 /* If x0 is identical to a subexpression of x1 then while
13012 processing x1, x0 has already been processed. Thus we
13013 are done with x. */
13014 if (ARITHMETIC_P (x1
)
13015 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13018 /* If x1 is identical to a subexpression of x0 then we
13019 still have to process the rest of x0. */
13020 if (ARITHMETIC_P (x0
)
13021 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13023 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
13028 update_table_tick (XEXP (x
, i
));
13030 else if (fmt
[i
] == 'E')
13031 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13032 update_table_tick (XVECEXP (x
, i
, j
));
13035 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13036 are saying that the register is clobbered and we no longer know its
13037 value. If INSN is zero, don't update reg_stat[].last_set; this is
13038 only permitted with VALUE also zero and is used to invalidate the
13042 record_value_for_reg (rtx reg
, rtx_insn
*insn
, rtx value
)
13044 unsigned int regno
= REGNO (reg
);
13045 unsigned int endregno
= END_REGNO (reg
);
13047 reg_stat_type
*rsp
;
13049 /* If VALUE contains REG and we have a previous value for REG, substitute
13050 the previous value. */
13051 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
13055 /* Set things up so get_last_value is allowed to see anything set up to
13057 subst_low_luid
= DF_INSN_LUID (insn
);
13058 tem
= get_last_value (reg
);
13060 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13061 it isn't going to be useful and will take a lot of time to process,
13062 so just use the CLOBBER. */
13066 if (ARITHMETIC_P (tem
)
13067 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
13068 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
13069 tem
= XEXP (tem
, 0);
13070 else if (count_occurrences (value
, reg
, 1) >= 2)
13072 /* If there are two or more occurrences of REG in VALUE,
13073 prevent the value from growing too much. */
13074 if (count_rtxs (tem
) > MAX_LAST_VALUE_RTL
)
13075 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
13078 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
13082 /* For each register modified, show we don't know its value, that
13083 we don't know about its bitwise content, that its value has been
13084 updated, and that we don't know the location of the death of the
13086 for (i
= regno
; i
< endregno
; i
++)
13088 rsp
= ®_stat
[i
];
13091 rsp
->last_set
= insn
;
13093 rsp
->last_set_value
= 0;
13094 rsp
->last_set_mode
= VOIDmode
;
13095 rsp
->last_set_nonzero_bits
= 0;
13096 rsp
->last_set_sign_bit_copies
= 0;
13097 rsp
->last_death
= 0;
13098 rsp
->truncated_to_mode
= VOIDmode
;
13101 /* Mark registers that are being referenced in this value. */
13103 update_table_tick (value
);
13105 /* Now update the status of each register being set.
13106 If someone is using this register in this block, set this register
13107 to invalid since we will get confused between the two lives in this
13108 basic block. This makes using this register always invalid. In cse, we
13109 scan the table to invalidate all entries using this register, but this
13110 is too much work for us. */
13112 for (i
= regno
; i
< endregno
; i
++)
13114 rsp
= ®_stat
[i
];
13115 rsp
->last_set_label
= label_tick
;
13117 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
13118 rsp
->last_set_invalid
= 1;
13120 rsp
->last_set_invalid
= 0;
13123 /* The value being assigned might refer to X (like in "x++;"). In that
13124 case, we must replace it with (clobber (const_int 0)) to prevent
13126 rsp
= ®_stat
[regno
];
13127 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
13129 value
= copy_rtx (value
);
13130 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
13134 /* For the main register being modified, update the value, the mode, the
13135 nonzero bits, and the number of sign bit copies. */
13137 rsp
->last_set_value
= value
;
13141 machine_mode mode
= GET_MODE (reg
);
13142 subst_low_luid
= DF_INSN_LUID (insn
);
13143 rsp
->last_set_mode
= mode
;
13144 if (GET_MODE_CLASS (mode
) == MODE_INT
13145 && HWI_COMPUTABLE_MODE_P (mode
))
13146 mode
= nonzero_bits_mode
;
13147 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
13148 rsp
->last_set_sign_bit_copies
13149 = num_sign_bit_copies (value
, GET_MODE (reg
));
13153 /* Called via note_stores from record_dead_and_set_regs to handle one
13154 SET or CLOBBER in an insn. DATA is the instruction in which the
13155 set is occurring. */
13158 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
13160 rtx_insn
*record_dead_insn
= (rtx_insn
*) data
;
13162 if (GET_CODE (dest
) == SUBREG
)
13163 dest
= SUBREG_REG (dest
);
13165 if (!record_dead_insn
)
13168 record_value_for_reg (dest
, NULL
, NULL_RTX
);
13174 /* If we are setting the whole register, we know its value. Otherwise
13175 show that we don't know the value. We can handle SUBREG in
13177 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
13178 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
13179 else if (GET_CODE (setter
) == SET
13180 && GET_CODE (SET_DEST (setter
)) == SUBREG
13181 && SUBREG_REG (SET_DEST (setter
)) == dest
13182 && GET_MODE_PRECISION (GET_MODE (dest
)) <= BITS_PER_WORD
13183 && subreg_lowpart_p (SET_DEST (setter
)))
13184 record_value_for_reg (dest
, record_dead_insn
,
13185 gen_lowpart (GET_MODE (dest
),
13186 SET_SRC (setter
)));
13188 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
13190 else if (MEM_P (dest
)
13191 /* Ignore pushes, they clobber nothing. */
13192 && ! push_operand (dest
, GET_MODE (dest
)))
13193 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
13196 /* Update the records of when each REG was most recently set or killed
13197 for the things done by INSN. This is the last thing done in processing
13198 INSN in the combiner loop.
13200 We update reg_stat[], in particular fields last_set, last_set_value,
13201 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13202 last_death, and also the similar information mem_last_set (which insn
13203 most recently modified memory) and last_call_luid (which insn was the
13204 most recent subroutine call). */
13207 record_dead_and_set_regs (rtx_insn
*insn
)
13212 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
13214 if (REG_NOTE_KIND (link
) == REG_DEAD
13215 && REG_P (XEXP (link
, 0)))
13217 unsigned int regno
= REGNO (XEXP (link
, 0));
13218 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
13220 for (i
= regno
; i
< endregno
; i
++)
13222 reg_stat_type
*rsp
;
13224 rsp
= ®_stat
[i
];
13225 rsp
->last_death
= insn
;
13228 else if (REG_NOTE_KIND (link
) == REG_INC
)
13229 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
13234 hard_reg_set_iterator hrsi
;
13235 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call
, 0, i
, hrsi
)
13237 reg_stat_type
*rsp
;
13239 rsp
= ®_stat
[i
];
13240 rsp
->last_set_invalid
= 1;
13241 rsp
->last_set
= insn
;
13242 rsp
->last_set_value
= 0;
13243 rsp
->last_set_mode
= VOIDmode
;
13244 rsp
->last_set_nonzero_bits
= 0;
13245 rsp
->last_set_sign_bit_copies
= 0;
13246 rsp
->last_death
= 0;
13247 rsp
->truncated_to_mode
= VOIDmode
;
13250 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
13252 /* We can't combine into a call pattern. Remember, though, that
13253 the return value register is set at this LUID. We could
13254 still replace a register with the return value from the
13255 wrong subroutine call! */
13256 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, NULL_RTX
);
13259 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, insn
);
13262 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13263 register present in the SUBREG, so for each such SUBREG go back and
13264 adjust nonzero and sign bit information of the registers that are
13265 known to have some zero/sign bits set.
13267 This is needed because when combine blows the SUBREGs away, the
13268 information on zero/sign bits is lost and further combines can be
13269 missed because of that. */
13272 record_promoted_value (rtx_insn
*insn
, rtx subreg
)
13274 struct insn_link
*links
;
13276 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
13277 machine_mode mode
= GET_MODE (subreg
);
13279 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
13282 for (links
= LOG_LINKS (insn
); links
;)
13284 reg_stat_type
*rsp
;
13286 insn
= links
->insn
;
13287 set
= single_set (insn
);
13289 if (! set
|| !REG_P (SET_DEST (set
))
13290 || REGNO (SET_DEST (set
)) != regno
13291 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
13293 links
= links
->next
;
13297 rsp
= ®_stat
[regno
];
13298 if (rsp
->last_set
== insn
)
13300 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
))
13301 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
13304 if (REG_P (SET_SRC (set
)))
13306 regno
= REGNO (SET_SRC (set
));
13307 links
= LOG_LINKS (insn
);
13314 /* Check if X, a register, is known to contain a value already
13315 truncated to MODE. In this case we can use a subreg to refer to
13316 the truncated value even though in the generic case we would need
13317 an explicit truncation. */
13320 reg_truncated_to_mode (machine_mode mode
, const_rtx x
)
13322 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
13323 machine_mode truncated
= rsp
->truncated_to_mode
;
13326 || rsp
->truncation_label
< label_tick_ebb_start
)
13328 if (GET_MODE_SIZE (truncated
) <= GET_MODE_SIZE (mode
))
13330 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, truncated
))
13335 /* If X is a hard reg or a subreg record the mode that the register is
13336 accessed in. For non-TRULY_NOOP_TRUNCATION targets we might be able
13337 to turn a truncate into a subreg using this information. Return true
13338 if traversing X is complete. */
13341 record_truncated_value (rtx x
)
13343 machine_mode truncated_mode
;
13344 reg_stat_type
*rsp
;
13346 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
13348 machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
13349 truncated_mode
= GET_MODE (x
);
13351 if (GET_MODE_SIZE (original_mode
) <= GET_MODE_SIZE (truncated_mode
))
13354 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode
, original_mode
))
13357 x
= SUBREG_REG (x
);
13359 /* ??? For hard-regs we now record everything. We might be able to
13360 optimize this using last_set_mode. */
13361 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
13362 truncated_mode
= GET_MODE (x
);
13366 rsp
= ®_stat
[REGNO (x
)];
13367 if (rsp
->truncated_to_mode
== 0
13368 || rsp
->truncation_label
< label_tick_ebb_start
13369 || (GET_MODE_SIZE (truncated_mode
)
13370 < GET_MODE_SIZE (rsp
->truncated_to_mode
)))
13372 rsp
->truncated_to_mode
= truncated_mode
;
13373 rsp
->truncation_label
= label_tick
;
13379 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13380 the modes they are used in. This can help truning TRUNCATEs into
13384 record_truncated_values (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
13386 subrtx_var_iterator::array_type array
;
13387 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
13388 if (record_truncated_value (*iter
))
13389 iter
.skip_subrtxes ();
13392 /* Scan X for promoted SUBREGs. For each one found,
13393 note what it implies to the registers used in it. */
13396 check_promoted_subreg (rtx_insn
*insn
, rtx x
)
13398 if (GET_CODE (x
) == SUBREG
13399 && SUBREG_PROMOTED_VAR_P (x
)
13400 && REG_P (SUBREG_REG (x
)))
13401 record_promoted_value (insn
, x
);
13404 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
13407 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
13411 check_promoted_subreg (insn
, XEXP (x
, i
));
13415 if (XVEC (x
, i
) != 0)
13416 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13417 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
13423 /* Verify that all the registers and memory references mentioned in *LOC are
13424 still valid. *LOC was part of a value set in INSN when label_tick was
13425 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13426 the invalid references with (clobber (const_int 0)) and return 1. This
13427 replacement is useful because we often can get useful information about
13428 the form of a value (e.g., if it was produced by a shift that always
13429 produces -1 or 0) even though we don't know exactly what registers it
13430 was produced from. */
13433 get_last_value_validate (rtx
*loc
, rtx_insn
*insn
, int tick
, int replace
)
13436 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
13437 int len
= GET_RTX_LENGTH (GET_CODE (x
));
13442 unsigned int regno
= REGNO (x
);
13443 unsigned int endregno
= END_REGNO (x
);
13446 for (j
= regno
; j
< endregno
; j
++)
13448 reg_stat_type
*rsp
= ®_stat
[j
];
13449 if (rsp
->last_set_invalid
13450 /* If this is a pseudo-register that was only set once and not
13451 live at the beginning of the function, it is always valid. */
13452 || (! (regno
>= FIRST_PSEUDO_REGISTER
13453 && regno
< reg_n_sets_max
13454 && REG_N_SETS (regno
) == 1
13455 && (!REGNO_REG_SET_P
13456 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
13458 && rsp
->last_set_label
> tick
))
13461 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13468 /* If this is a memory reference, make sure that there were no stores after
13469 it that might have clobbered the value. We don't have alias info, so we
13470 assume any store invalidates it. Moreover, we only have local UIDs, so
13471 we also assume that there were stores in the intervening basic blocks. */
13472 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
13473 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
13476 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13480 for (i
= 0; i
< len
; i
++)
13484 /* Check for identical subexpressions. If x contains
13485 identical subexpression we only have to traverse one of
13487 if (i
== 1 && ARITHMETIC_P (x
))
13489 /* Note that at this point x0 has already been checked
13490 and found valid. */
13491 rtx x0
= XEXP (x
, 0);
13492 rtx x1
= XEXP (x
, 1);
13494 /* If x0 and x1 are identical then x is also valid. */
13498 /* If x1 is identical to a subexpression of x0 then
13499 while checking x0, x1 has already been checked. Thus
13500 it is valid and so as x. */
13501 if (ARITHMETIC_P (x0
)
13502 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13505 /* If x0 is identical to a subexpression of x1 then x is
13506 valid iff the rest of x1 is valid. */
13507 if (ARITHMETIC_P (x1
)
13508 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13510 get_last_value_validate (&XEXP (x1
,
13511 x0
== XEXP (x1
, 0) ? 1 : 0),
13512 insn
, tick
, replace
);
13515 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
13519 else if (fmt
[i
] == 'E')
13520 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13521 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
13522 insn
, tick
, replace
) == 0)
13526 /* If we haven't found a reason for it to be invalid, it is valid. */
13530 /* Get the last value assigned to X, if known. Some registers
13531 in the value may be replaced with (clobber (const_int 0)) if their value
13532 is known longer known reliably. */
13535 get_last_value (const_rtx x
)
13537 unsigned int regno
;
13539 reg_stat_type
*rsp
;
13541 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13542 then convert it to the desired mode. If this is a paradoxical SUBREG,
13543 we cannot predict what values the "extra" bits might have. */
13544 if (GET_CODE (x
) == SUBREG
13545 && subreg_lowpart_p (x
)
13546 && !paradoxical_subreg_p (x
)
13547 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
13548 return gen_lowpart (GET_MODE (x
), value
);
13554 rsp
= ®_stat
[regno
];
13555 value
= rsp
->last_set_value
;
13557 /* If we don't have a value, or if it isn't for this basic block and
13558 it's either a hard register, set more than once, or it's a live
13559 at the beginning of the function, return 0.
13561 Because if it's not live at the beginning of the function then the reg
13562 is always set before being used (is never used without being set).
13563 And, if it's set only once, and it's always set before use, then all
13564 uses must have the same last value, even if it's not from this basic
13568 || (rsp
->last_set_label
< label_tick_ebb_start
13569 && (regno
< FIRST_PSEUDO_REGISTER
13570 || regno
>= reg_n_sets_max
13571 || REG_N_SETS (regno
) != 1
13573 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), regno
))))
13576 /* If the value was set in a later insn than the ones we are processing,
13577 we can't use it even if the register was only set once. */
13578 if (rsp
->last_set_label
== label_tick
13579 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
13582 /* If fewer bits were set than what we are asked for now, we cannot use
13584 if (GET_MODE_PRECISION (rsp
->last_set_mode
)
13585 < GET_MODE_PRECISION (GET_MODE (x
)))
13588 /* If the value has all its registers valid, return it. */
13589 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
13592 /* Otherwise, make a copy and replace any invalid register with
13593 (clobber (const_int 0)). If that fails for some reason, return 0. */
13595 value
= copy_rtx (value
);
13596 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
13602 /* Return nonzero if expression X refers to a REG or to memory
13603 that is set in an instruction more recent than FROM_LUID. */
13606 use_crosses_set_p (const_rtx x
, int from_luid
)
13610 enum rtx_code code
= GET_CODE (x
);
13614 unsigned int regno
= REGNO (x
);
13615 unsigned endreg
= END_REGNO (x
);
13617 #ifdef PUSH_ROUNDING
13618 /* Don't allow uses of the stack pointer to be moved,
13619 because we don't know whether the move crosses a push insn. */
13620 if (regno
== STACK_POINTER_REGNUM
&& PUSH_ARGS
)
13623 for (; regno
< endreg
; regno
++)
13625 reg_stat_type
*rsp
= ®_stat
[regno
];
13627 && rsp
->last_set_label
== label_tick
13628 && DF_INSN_LUID (rsp
->last_set
) > from_luid
)
13634 if (code
== MEM
&& mem_last_set
> from_luid
)
13637 fmt
= GET_RTX_FORMAT (code
);
13639 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13644 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13645 if (use_crosses_set_p (XVECEXP (x
, i
, j
), from_luid
))
13648 else if (fmt
[i
] == 'e'
13649 && use_crosses_set_p (XEXP (x
, i
), from_luid
))
13655 /* Define three variables used for communication between the following
13658 static unsigned int reg_dead_regno
, reg_dead_endregno
;
13659 static int reg_dead_flag
;
13661 /* Function called via note_stores from reg_dead_at_p.
13663 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13664 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13667 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
13669 unsigned int regno
, endregno
;
13674 regno
= REGNO (dest
);
13675 endregno
= END_REGNO (dest
);
13676 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
13677 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
13680 /* Return nonzero if REG is known to be dead at INSN.
13682 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13683 referencing REG, it is dead. If we hit a SET referencing REG, it is
13684 live. Otherwise, see if it is live or dead at the start of the basic
13685 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13686 must be assumed to be always live. */
13689 reg_dead_at_p (rtx reg
, rtx_insn
*insn
)
13694 /* Set variables for reg_dead_at_p_1. */
13695 reg_dead_regno
= REGNO (reg
);
13696 reg_dead_endregno
= END_REGNO (reg
);
13700 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13701 we allow the machine description to decide whether use-and-clobber
13702 patterns are OK. */
13703 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
13705 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13706 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
13710 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13711 beginning of basic block. */
13712 block
= BLOCK_FOR_INSN (insn
);
13717 if (find_regno_note (insn
, REG_UNUSED
, reg_dead_regno
))
13720 note_stores (PATTERN (insn
), reg_dead_at_p_1
, NULL
);
13722 return reg_dead_flag
== 1 ? 1 : 0;
13724 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
13728 if (insn
== BB_HEAD (block
))
13731 insn
= PREV_INSN (insn
);
13734 /* Look at live-in sets for the basic block that we were in. */
13735 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13736 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
13742 /* Note hard registers in X that are used. */
13745 mark_used_regs_combine (rtx x
)
13747 RTX_CODE code
= GET_CODE (x
);
13748 unsigned int regno
;
13759 case ADDR_DIFF_VEC
:
13761 /* CC0 must die in the insn after it is set, so we don't need to take
13762 special note of it here. */
13767 /* If we are clobbering a MEM, mark any hard registers inside the
13768 address as used. */
13769 if (MEM_P (XEXP (x
, 0)))
13770 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
13775 /* A hard reg in a wide mode may really be multiple registers.
13776 If so, mark all of them just like the first. */
13777 if (regno
< FIRST_PSEUDO_REGISTER
)
13779 /* None of this applies to the stack, frame or arg pointers. */
13780 if (regno
== STACK_POINTER_REGNUM
13781 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13782 && regno
== HARD_FRAME_POINTER_REGNUM
)
13783 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
13784 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
13785 || regno
== FRAME_POINTER_REGNUM
)
13788 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
13794 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13796 rtx testreg
= SET_DEST (x
);
13798 while (GET_CODE (testreg
) == SUBREG
13799 || GET_CODE (testreg
) == ZERO_EXTRACT
13800 || GET_CODE (testreg
) == STRICT_LOW_PART
)
13801 testreg
= XEXP (testreg
, 0);
13803 if (MEM_P (testreg
))
13804 mark_used_regs_combine (XEXP (testreg
, 0));
13806 mark_used_regs_combine (SET_SRC (x
));
13814 /* Recursively scan the operands of this expression. */
13817 const char *fmt
= GET_RTX_FORMAT (code
);
13819 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13822 mark_used_regs_combine (XEXP (x
, i
));
13823 else if (fmt
[i
] == 'E')
13827 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13828 mark_used_regs_combine (XVECEXP (x
, i
, j
));
13834 /* Remove register number REGNO from the dead registers list of INSN.
13836 Return the note used to record the death, if there was one. */
13839 remove_death (unsigned int regno
, rtx_insn
*insn
)
13841 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
13844 remove_note (insn
, note
);
13849 /* For each register (hardware or pseudo) used within expression X, if its
13850 death is in an instruction with luid between FROM_LUID (inclusive) and
13851 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13852 list headed by PNOTES.
13854 That said, don't move registers killed by maybe_kill_insn.
13856 This is done when X is being merged by combination into TO_INSN. These
13857 notes will then be distributed as needed. */
13860 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx_insn
*to_insn
,
13865 enum rtx_code code
= GET_CODE (x
);
13869 unsigned int regno
= REGNO (x
);
13870 rtx_insn
*where_dead
= reg_stat
[regno
].last_death
;
13872 /* Don't move the register if it gets killed in between from and to. */
13873 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
13874 && ! reg_referenced_p (x
, maybe_kill_insn
))
13878 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
13879 && DF_INSN_LUID (where_dead
) >= from_luid
13880 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
13882 rtx note
= remove_death (regno
, where_dead
);
13884 /* It is possible for the call above to return 0. This can occur
13885 when last_death points to I2 or I1 that we combined with.
13886 In that case make a new note.
13888 We must also check for the case where X is a hard register
13889 and NOTE is a death note for a range of hard registers
13890 including X. In that case, we must put REG_DEAD notes for
13891 the remaining registers in place of NOTE. */
13893 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
13894 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13895 > GET_MODE_SIZE (GET_MODE (x
))))
13897 unsigned int deadregno
= REGNO (XEXP (note
, 0));
13898 unsigned int deadend
= END_REGNO (XEXP (note
, 0));
13899 unsigned int ourend
= END_REGNO (x
);
13902 for (i
= deadregno
; i
< deadend
; i
++)
13903 if (i
< regno
|| i
>= ourend
)
13904 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
13907 /* If we didn't find any note, or if we found a REG_DEAD note that
13908 covers only part of the given reg, and we have a multi-reg hard
13909 register, then to be safe we must check for REG_DEAD notes
13910 for each register other than the first. They could have
13911 their own REG_DEAD notes lying around. */
13912 else if ((note
== 0
13914 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13915 < GET_MODE_SIZE (GET_MODE (x
)))))
13916 && regno
< FIRST_PSEUDO_REGISTER
13917 && REG_NREGS (x
) > 1)
13919 unsigned int ourend
= END_REGNO (x
);
13920 unsigned int i
, offset
;
13924 offset
= hard_regno_nregs
[regno
][GET_MODE (XEXP (note
, 0))];
13928 for (i
= regno
+ offset
; i
< ourend
; i
++)
13929 move_deaths (regno_reg_rtx
[i
],
13930 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
13933 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
13935 XEXP (note
, 1) = *pnotes
;
13939 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
13945 else if (GET_CODE (x
) == SET
)
13947 rtx dest
= SET_DEST (x
);
13949 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13951 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13952 that accesses one word of a multi-word item, some
13953 piece of everything register in the expression is used by
13954 this insn, so remove any old death. */
13955 /* ??? So why do we test for equality of the sizes? */
13957 if (GET_CODE (dest
) == ZERO_EXTRACT
13958 || GET_CODE (dest
) == STRICT_LOW_PART
13959 || (GET_CODE (dest
) == SUBREG
13960 && (((GET_MODE_SIZE (GET_MODE (dest
))
13961 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
13962 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
13963 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
))))
13965 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13969 /* If this is some other SUBREG, we know it replaces the entire
13970 value, so use that as the destination. */
13971 if (GET_CODE (dest
) == SUBREG
)
13972 dest
= SUBREG_REG (dest
);
13974 /* If this is a MEM, adjust deaths of anything used in the address.
13975 For a REG (the only other possibility), the entire value is
13976 being replaced so the old value is not used in this insn. */
13979 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
13984 else if (GET_CODE (x
) == CLOBBER
)
13987 len
= GET_RTX_LENGTH (code
);
13988 fmt
= GET_RTX_FORMAT (code
);
13990 for (i
= 0; i
< len
; i
++)
13995 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13996 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
13999 else if (fmt
[i
] == 'e')
14000 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14004 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14005 pattern of an insn. X must be a REG. */
14008 reg_bitfield_target_p (rtx x
, rtx body
)
14012 if (GET_CODE (body
) == SET
)
14014 rtx dest
= SET_DEST (body
);
14016 unsigned int regno
, tregno
, endregno
, endtregno
;
14018 if (GET_CODE (dest
) == ZERO_EXTRACT
)
14019 target
= XEXP (dest
, 0);
14020 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
14021 target
= SUBREG_REG (XEXP (dest
, 0));
14025 if (GET_CODE (target
) == SUBREG
)
14026 target
= SUBREG_REG (target
);
14028 if (!REG_P (target
))
14031 tregno
= REGNO (target
), regno
= REGNO (x
);
14032 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
14033 return target
== x
;
14035 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
14036 endregno
= end_hard_regno (GET_MODE (x
), regno
);
14038 return endregno
> tregno
&& regno
< endtregno
;
14041 else if (GET_CODE (body
) == PARALLEL
)
14042 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
14043 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
14049 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14050 as appropriate. I3 and I2 are the insns resulting from the combination
14051 insns including FROM (I2 may be zero).
14053 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14054 not need REG_DEAD notes because they are being substituted for. This
14055 saves searching in the most common cases.
14057 Each note in the list is either ignored or placed on some insns, depending
14058 on the type of note. */
14061 distribute_notes (rtx notes
, rtx_insn
*from_insn
, rtx_insn
*i3
, rtx_insn
*i2
,
14062 rtx elim_i2
, rtx elim_i1
, rtx elim_i0
)
14064 rtx note
, next_note
;
14066 rtx_insn
*tem_insn
;
14068 for (note
= notes
; note
; note
= next_note
)
14070 rtx_insn
*place
= 0, *place2
= 0;
14072 next_note
= XEXP (note
, 1);
14073 switch (REG_NOTE_KIND (note
))
14077 /* Doesn't matter much where we put this, as long as it's somewhere.
14078 It is preferable to keep these notes on branches, which is most
14079 likely to be i3. */
14083 case REG_NON_LOCAL_GOTO
:
14088 gcc_assert (i2
&& JUMP_P (i2
));
14093 case REG_EH_REGION
:
14094 /* These notes must remain with the call or trapping instruction. */
14097 else if (i2
&& CALL_P (i2
))
14101 gcc_assert (cfun
->can_throw_non_call_exceptions
);
14102 if (may_trap_p (i3
))
14104 else if (i2
&& may_trap_p (i2
))
14106 /* ??? Otherwise assume we've combined things such that we
14107 can now prove that the instructions can't trap. Drop the
14108 note in this case. */
14112 case REG_ARGS_SIZE
:
14113 /* ??? How to distribute between i3-i1. Assume i3 contains the
14114 entire adjustment. Assert i3 contains at least some adjust. */
14115 if (!noop_move_p (i3
))
14117 int old_size
, args_size
= INTVAL (XEXP (note
, 0));
14118 /* fixup_args_size_notes looks at REG_NORETURN note,
14119 so ensure the note is placed there first. */
14123 for (np
= &next_note
; *np
; np
= &XEXP (*np
, 1))
14124 if (REG_NOTE_KIND (*np
) == REG_NORETURN
)
14128 XEXP (n
, 1) = REG_NOTES (i3
);
14129 REG_NOTES (i3
) = n
;
14133 old_size
= fixup_args_size_notes (PREV_INSN (i3
), i3
, args_size
);
14134 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14135 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14136 gcc_assert (old_size
!= args_size
14138 && !ACCUMULATE_OUTGOING_ARGS
14139 && find_reg_note (i3
, REG_NORETURN
, NULL_RTX
)));
14146 case REG_CALL_DECL
:
14147 /* These notes must remain with the call. It should not be
14148 possible for both I2 and I3 to be a call. */
14153 gcc_assert (i2
&& CALL_P (i2
));
14159 /* Any clobbers for i3 may still exist, and so we must process
14160 REG_UNUSED notes from that insn.
14162 Any clobbers from i2 or i1 can only exist if they were added by
14163 recog_for_combine. In that case, recog_for_combine created the
14164 necessary REG_UNUSED notes. Trying to keep any original
14165 REG_UNUSED notes from these insns can cause incorrect output
14166 if it is for the same register as the original i3 dest.
14167 In that case, we will notice that the register is set in i3,
14168 and then add a REG_UNUSED note for the destination of i3, which
14169 is wrong. However, it is possible to have REG_UNUSED notes from
14170 i2 or i1 for register which were both used and clobbered, so
14171 we keep notes from i2 or i1 if they will turn into REG_DEAD
14174 /* If this register is set or clobbered in I3, put the note there
14175 unless there is one already. */
14176 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
14178 if (from_insn
!= i3
)
14181 if (! (REG_P (XEXP (note
, 0))
14182 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
14183 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
14186 /* Otherwise, if this register is used by I3, then this register
14187 now dies here, so we must put a REG_DEAD note here unless there
14189 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
14190 && ! (REG_P (XEXP (note
, 0))
14191 ? find_regno_note (i3
, REG_DEAD
,
14192 REGNO (XEXP (note
, 0)))
14193 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
14195 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14203 /* These notes say something about results of an insn. We can
14204 only support them if they used to be on I3 in which case they
14205 remain on I3. Otherwise they are ignored.
14207 If the note refers to an expression that is not a constant, we
14208 must also ignore the note since we cannot tell whether the
14209 equivalence is still true. It might be possible to do
14210 slightly better than this (we only have a problem if I2DEST
14211 or I1DEST is present in the expression), but it doesn't
14212 seem worth the trouble. */
14214 if (from_insn
== i3
14215 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
14220 /* These notes say something about how a register is used. They must
14221 be present on any use of the register in I2 or I3. */
14222 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
14225 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
14234 case REG_LABEL_TARGET
:
14235 case REG_LABEL_OPERAND
:
14236 /* This can show up in several ways -- either directly in the
14237 pattern, or hidden off in the constant pool with (or without?)
14238 a REG_EQUAL note. */
14239 /* ??? Ignore the without-reg_equal-note problem for now. */
14240 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
14241 || ((tem_note
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
14242 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14243 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0)))
14247 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
14248 || ((tem_note
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
14249 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14250 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0))))
14258 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14259 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14261 if (place
&& JUMP_P (place
)
14262 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14263 && (JUMP_LABEL (place
) == NULL
14264 || JUMP_LABEL (place
) == XEXP (note
, 0)))
14266 rtx label
= JUMP_LABEL (place
);
14269 JUMP_LABEL (place
) = XEXP (note
, 0);
14270 else if (LABEL_P (label
))
14271 LABEL_NUSES (label
)--;
14274 if (place2
&& JUMP_P (place2
)
14275 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14276 && (JUMP_LABEL (place2
) == NULL
14277 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
14279 rtx label
= JUMP_LABEL (place2
);
14282 JUMP_LABEL (place2
) = XEXP (note
, 0);
14283 else if (LABEL_P (label
))
14284 LABEL_NUSES (label
)--;
14290 /* This note says something about the value of a register prior
14291 to the execution of an insn. It is too much trouble to see
14292 if the note is still correct in all situations. It is better
14293 to simply delete it. */
14297 /* If we replaced the right hand side of FROM_INSN with a
14298 REG_EQUAL note, the original use of the dying register
14299 will not have been combined into I3 and I2. In such cases,
14300 FROM_INSN is guaranteed to be the first of the combined
14301 instructions, so we simply need to search back before
14302 FROM_INSN for the previous use or set of this register,
14303 then alter the notes there appropriately.
14305 If the register is used as an input in I3, it dies there.
14306 Similarly for I2, if it is nonzero and adjacent to I3.
14308 If the register is not used as an input in either I3 or I2
14309 and it is not one of the registers we were supposed to eliminate,
14310 there are two possibilities. We might have a non-adjacent I2
14311 or we might have somehow eliminated an additional register
14312 from a computation. For example, we might have had A & B where
14313 we discover that B will always be zero. In this case we will
14314 eliminate the reference to A.
14316 In both cases, we must search to see if we can find a previous
14317 use of A and put the death note there. */
14320 && from_insn
== i2mod
14321 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
14322 tem_insn
= from_insn
;
14326 && CALL_P (from_insn
)
14327 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
14329 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
14331 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
14332 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14334 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
14336 && reg_overlap_mentioned_p (XEXP (note
, 0),
14338 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
14339 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
14342 /* If the new I2 sets the same register that is marked dead
14343 in the note, we do not know where to put the note.
14345 if (i2
!= 0 && reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14351 basic_block bb
= this_basic_block
;
14353 for (tem_insn
= PREV_INSN (tem_insn
); place
== 0; tem_insn
= PREV_INSN (tem_insn
))
14355 if (!NONDEBUG_INSN_P (tem_insn
))
14357 if (tem_insn
== BB_HEAD (bb
))
14362 /* If the register is being set at TEM_INSN, see if that is all
14363 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14364 into a REG_UNUSED note instead. Don't delete sets to
14365 global register vars. */
14366 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
14367 || !global_regs
[REGNO (XEXP (note
, 0))])
14368 && reg_set_p (XEXP (note
, 0), PATTERN (tem_insn
)))
14370 rtx set
= single_set (tem_insn
);
14371 rtx inner_dest
= 0;
14372 rtx_insn
*cc0_setter
= NULL
;
14375 for (inner_dest
= SET_DEST (set
);
14376 (GET_CODE (inner_dest
) == STRICT_LOW_PART
14377 || GET_CODE (inner_dest
) == SUBREG
14378 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
14379 inner_dest
= XEXP (inner_dest
, 0))
14382 /* Verify that it was the set, and not a clobber that
14383 modified the register.
14385 CC0 targets must be careful to maintain setter/user
14386 pairs. If we cannot delete the setter due to side
14387 effects, mark the user with an UNUSED note instead
14390 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
14391 && rtx_equal_p (XEXP (note
, 0), inner_dest
)
14393 || (! reg_mentioned_p (cc0_rtx
, SET_SRC (set
))
14394 || ((cc0_setter
= prev_cc0_setter (tem_insn
)) != NULL
14395 && sets_cc0_p (PATTERN (cc0_setter
)) > 0))))
14397 /* Move the notes and links of TEM_INSN elsewhere.
14398 This might delete other dead insns recursively.
14399 First set the pattern to something that won't use
14401 rtx old_notes
= REG_NOTES (tem_insn
);
14403 PATTERN (tem_insn
) = pc_rtx
;
14404 REG_NOTES (tem_insn
) = NULL
;
14406 distribute_notes (old_notes
, tem_insn
, tem_insn
, NULL
,
14407 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14408 distribute_links (LOG_LINKS (tem_insn
));
14410 unsigned int regno
= REGNO (XEXP (note
, 0));
14411 reg_stat_type
*rsp
= ®_stat
[regno
];
14412 if (rsp
->last_set
== tem_insn
)
14413 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14415 SET_INSN_DELETED (tem_insn
);
14416 if (tem_insn
== i2
)
14419 /* Delete the setter too. */
14422 PATTERN (cc0_setter
) = pc_rtx
;
14423 old_notes
= REG_NOTES (cc0_setter
);
14424 REG_NOTES (cc0_setter
) = NULL
;
14426 distribute_notes (old_notes
, cc0_setter
,
14428 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14429 distribute_links (LOG_LINKS (cc0_setter
));
14431 SET_INSN_DELETED (cc0_setter
);
14432 if (cc0_setter
== i2
)
14438 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
14440 /* If there isn't already a REG_UNUSED note, put one
14441 here. Do not place a REG_DEAD note, even if
14442 the register is also used here; that would not
14443 match the algorithm used in lifetime analysis
14444 and can cause the consistency check in the
14445 scheduler to fail. */
14446 if (! find_regno_note (tem_insn
, REG_UNUSED
,
14447 REGNO (XEXP (note
, 0))))
14452 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem_insn
))
14453 || (CALL_P (tem_insn
)
14454 && find_reg_fusage (tem_insn
, USE
, XEXP (note
, 0))))
14458 /* If we are doing a 3->2 combination, and we have a
14459 register which formerly died in i3 and was not used
14460 by i2, which now no longer dies in i3 and is used in
14461 i2 but does not die in i2, and place is between i2
14462 and i3, then we may need to move a link from place to
14464 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
14466 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
14467 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14469 struct insn_link
*links
= LOG_LINKS (place
);
14470 LOG_LINKS (place
) = NULL
;
14471 distribute_links (links
);
14476 if (tem_insn
== BB_HEAD (bb
))
14482 /* If the register is set or already dead at PLACE, we needn't do
14483 anything with this note if it is still a REG_DEAD note.
14484 We check here if it is set at all, not if is it totally replaced,
14485 which is what `dead_or_set_p' checks, so also check for it being
14488 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
14490 unsigned int regno
= REGNO (XEXP (note
, 0));
14491 reg_stat_type
*rsp
= ®_stat
[regno
];
14493 if (dead_or_set_p (place
, XEXP (note
, 0))
14494 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
14496 /* Unless the register previously died in PLACE, clear
14497 last_death. [I no longer understand why this is
14499 if (rsp
->last_death
!= place
)
14500 rsp
->last_death
= 0;
14504 rsp
->last_death
= place
;
14506 /* If this is a death note for a hard reg that is occupying
14507 multiple registers, ensure that we are still using all
14508 parts of the object. If we find a piece of the object
14509 that is unused, we must arrange for an appropriate REG_DEAD
14510 note to be added for it. However, we can't just emit a USE
14511 and tag the note to it, since the register might actually
14512 be dead; so we recourse, and the recursive call then finds
14513 the previous insn that used this register. */
14515 if (place
&& REG_NREGS (XEXP (note
, 0)) > 1)
14517 unsigned int endregno
= END_REGNO (XEXP (note
, 0));
14518 bool all_used
= true;
14521 for (i
= regno
; i
< endregno
; i
++)
14522 if ((! refers_to_regno_p (i
, PATTERN (place
))
14523 && ! find_regno_fusage (place
, USE
, i
))
14524 || dead_or_set_regno_p (place
, i
))
14532 /* Put only REG_DEAD notes for pieces that are
14533 not already dead or set. */
14535 for (i
= regno
; i
< endregno
;
14536 i
+= hard_regno_nregs
[i
][reg_raw_mode
[i
]])
14538 rtx piece
= regno_reg_rtx
[i
];
14539 basic_block bb
= this_basic_block
;
14541 if (! dead_or_set_p (place
, piece
)
14542 && ! reg_bitfield_target_p (piece
,
14545 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
14548 distribute_notes (new_note
, place
, place
,
14549 NULL
, NULL_RTX
, NULL_RTX
,
14552 else if (! refers_to_regno_p (i
, PATTERN (place
))
14553 && ! find_regno_fusage (place
, USE
, i
))
14554 for (tem_insn
= PREV_INSN (place
); ;
14555 tem_insn
= PREV_INSN (tem_insn
))
14557 if (!NONDEBUG_INSN_P (tem_insn
))
14559 if (tem_insn
== BB_HEAD (bb
))
14563 if (dead_or_set_p (tem_insn
, piece
)
14564 || reg_bitfield_target_p (piece
,
14565 PATTERN (tem_insn
)))
14567 add_reg_note (tem_insn
, REG_UNUSED
, piece
);
14580 /* Any other notes should not be present at this point in the
14582 gcc_unreachable ();
14587 XEXP (note
, 1) = REG_NOTES (place
);
14588 REG_NOTES (place
) = note
;
14592 add_shallow_copy_of_reg_note (place2
, note
);
14596 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14597 I3, I2, and I1 to new locations. This is also called to add a link
14598 pointing at I3 when I3's destination is changed. */
14601 distribute_links (struct insn_link
*links
)
14603 struct insn_link
*link
, *next_link
;
14605 for (link
= links
; link
; link
= next_link
)
14607 rtx_insn
*place
= 0;
14611 next_link
= link
->next
;
14613 /* If the insn that this link points to is a NOTE, ignore it. */
14614 if (NOTE_P (link
->insn
))
14618 rtx pat
= PATTERN (link
->insn
);
14619 if (GET_CODE (pat
) == SET
)
14621 else if (GET_CODE (pat
) == PARALLEL
)
14624 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
14626 set
= XVECEXP (pat
, 0, i
);
14627 if (GET_CODE (set
) != SET
)
14630 reg
= SET_DEST (set
);
14631 while (GET_CODE (reg
) == ZERO_EXTRACT
14632 || GET_CODE (reg
) == STRICT_LOW_PART
14633 || GET_CODE (reg
) == SUBREG
)
14634 reg
= XEXP (reg
, 0);
14639 if (REGNO (reg
) == link
->regno
)
14642 if (i
== XVECLEN (pat
, 0))
14648 reg
= SET_DEST (set
);
14650 while (GET_CODE (reg
) == ZERO_EXTRACT
14651 || GET_CODE (reg
) == STRICT_LOW_PART
14652 || GET_CODE (reg
) == SUBREG
)
14653 reg
= XEXP (reg
, 0);
14655 /* A LOG_LINK is defined as being placed on the first insn that uses
14656 a register and points to the insn that sets the register. Start
14657 searching at the next insn after the target of the link and stop
14658 when we reach a set of the register or the end of the basic block.
14660 Note that this correctly handles the link that used to point from
14661 I3 to I2. Also note that not much searching is typically done here
14662 since most links don't point very far away. */
14664 for (insn
= NEXT_INSN (link
->insn
);
14665 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
14666 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
14667 insn
= NEXT_INSN (insn
))
14668 if (DEBUG_INSN_P (insn
))
14670 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
14672 if (reg_referenced_p (reg
, PATTERN (insn
)))
14676 else if (CALL_P (insn
)
14677 && find_reg_fusage (insn
, USE
, reg
))
14682 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
14685 /* If we found a place to put the link, place it there unless there
14686 is already a link to the same insn as LINK at that point. */
14690 struct insn_link
*link2
;
14692 FOR_EACH_LOG_LINK (link2
, place
)
14693 if (link2
->insn
== link
->insn
&& link2
->regno
== link
->regno
)
14698 link
->next
= LOG_LINKS (place
);
14699 LOG_LINKS (place
) = link
;
14701 /* Set added_links_insn to the earliest insn we added a
14703 if (added_links_insn
== 0
14704 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
14705 added_links_insn
= place
;
14711 /* Check for any register or memory mentioned in EQUIV that is not
14712 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14713 of EXPR where some registers may have been replaced by constants. */
14716 unmentioned_reg_p (rtx equiv
, rtx expr
)
14718 subrtx_iterator::array_type array
;
14719 FOR_EACH_SUBRTX (iter
, array
, equiv
, NONCONST
)
14721 const_rtx x
= *iter
;
14722 if ((REG_P (x
) || MEM_P (x
))
14723 && !reg_mentioned_p (x
, expr
))
14729 DEBUG_FUNCTION
void
14730 dump_combine_stats (FILE *file
)
14734 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14735 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
14739 dump_combine_total_stats (FILE *file
)
14743 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14744 total_attempts
, total_merges
, total_extras
, total_successes
);
14747 /* Try combining insns through substitution. */
14748 static unsigned int
14749 rest_of_handle_combine (void)
14751 int rebuild_jump_labels_after_combine
;
14753 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
14754 df_note_add_problem ();
14757 regstat_init_n_sets_and_refs ();
14758 reg_n_sets_max
= max_reg_num ();
14760 rebuild_jump_labels_after_combine
14761 = combine_instructions (get_insns (), max_reg_num ());
14763 /* Combining insns may have turned an indirect jump into a
14764 direct jump. Rebuild the JUMP_LABEL fields of jumping
14766 if (rebuild_jump_labels_after_combine
)
14768 if (dom_info_available_p (CDI_DOMINATORS
))
14769 free_dominance_info (CDI_DOMINATORS
);
14770 timevar_push (TV_JUMP
);
14771 rebuild_jump_labels (get_insns ());
14773 timevar_pop (TV_JUMP
);
14776 regstat_free_n_sets_and_refs ();
14782 const pass_data pass_data_combine
=
14784 RTL_PASS
, /* type */
14785 "combine", /* name */
14786 OPTGROUP_NONE
, /* optinfo_flags */
14787 TV_COMBINE
, /* tv_id */
14788 PROP_cfglayout
, /* properties_required */
14789 0, /* properties_provided */
14790 0, /* properties_destroyed */
14791 0, /* todo_flags_start */
14792 TODO_df_finish
, /* todo_flags_finish */
14795 class pass_combine
: public rtl_opt_pass
14798 pass_combine (gcc::context
*ctxt
)
14799 : rtl_opt_pass (pass_data_combine
, ctxt
)
14802 /* opt_pass methods: */
14803 virtual bool gate (function
*) { return (optimize
> 0); }
14804 virtual unsigned int execute (function
*)
14806 return rest_of_handle_combine ();
14809 }; // class pass_combine
14811 } // anon namespace
14814 make_pass_combine (gcc::context
*ctxt
)
14816 return new pass_combine (ctxt
);