1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with modified_between_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
95 #include "stor-layout.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
104 #include "tree-pass.h"
105 #include "valtrack.h"
106 #include "rtl-iter.h"
107 #include "print-rtl.h"
108 #include "function-abi.h"
110 /* Number of attempts to combine instructions in this function. */
112 static int combine_attempts
;
114 /* Number of attempts that got as far as substitution in this function. */
116 static int combine_merges
;
118 /* Number of instructions combined with added SETs in this function. */
120 static int combine_extras
;
122 /* Number of instructions combined in this function. */
124 static int combine_successes
;
126 /* Totals over entire compilation. */
128 static int total_attempts
, total_merges
, total_extras
, total_successes
;
130 /* combine_instructions may try to replace the right hand side of the
131 second instruction with the value of an associated REG_EQUAL note
132 before throwing it at try_combine. That is problematic when there
133 is a REG_DEAD note for a register used in the old right hand side
134 and can cause distribute_notes to do wrong things. This is the
135 second instruction if it has been so modified, null otherwise. */
137 static rtx_insn
*i2mod
;
139 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
141 static rtx i2mod_old_rhs
;
143 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
145 static rtx i2mod_new_rhs
;
147 struct reg_stat_type
{
148 /* Record last point of death of (hard or pseudo) register n. */
149 rtx_insn
*last_death
;
151 /* Record last point of modification of (hard or pseudo) register n. */
154 /* The next group of fields allows the recording of the last value assigned
155 to (hard or pseudo) register n. We use this information to see if an
156 operation being processed is redundant given a prior operation performed
157 on the register. For example, an `and' with a constant is redundant if
158 all the zero bits are already known to be turned off.
160 We use an approach similar to that used by cse, but change it in the
163 (1) We do not want to reinitialize at each label.
164 (2) It is useful, but not critical, to know the actual value assigned
165 to a register. Often just its form is helpful.
167 Therefore, we maintain the following fields:
169 last_set_value the last value assigned
170 last_set_label records the value of label_tick when the
171 register was assigned
172 last_set_table_tick records the value of label_tick when a
173 value using the register is assigned
174 last_set_invalid set to nonzero when it is not valid
175 to use the value of this register in some
178 To understand the usage of these tables, it is important to understand
179 the distinction between the value in last_set_value being valid and
180 the register being validly contained in some other expression in the
183 (The next two parameters are out of date).
185 reg_stat[i].last_set_value is valid if it is nonzero, and either
186 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
188 Register I may validly appear in any expression returned for the value
189 of another register if reg_n_sets[i] is 1. It may also appear in the
190 value for register J if reg_stat[j].last_set_invalid is zero, or
191 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
193 If an expression is found in the table containing a register which may
194 not validly appear in an expression, the register is replaced by
195 something that won't match, (clobber (const_int 0)). */
197 /* Record last value assigned to (hard or pseudo) register n. */
201 /* Record the value of label_tick when an expression involving register n
202 is placed in last_set_value. */
204 int last_set_table_tick
;
206 /* Record the value of label_tick when the value for register n is placed in
211 /* These fields are maintained in parallel with last_set_value and are
212 used to store the mode in which the register was last set, the bits
213 that were known to be zero when it was last set, and the number of
214 sign bits copies it was known to have when it was last set. */
216 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
217 char last_set_sign_bit_copies
;
218 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
220 /* Set nonzero if references to register n in expressions should not be
221 used. last_set_invalid is set nonzero when this register is being
222 assigned to and last_set_table_tick == label_tick. */
224 char last_set_invalid
;
226 /* Some registers that are set more than once and used in more than one
227 basic block are nevertheless always set in similar ways. For example,
228 a QImode register may be loaded from memory in two places on a machine
229 where byte loads zero extend.
231 We record in the following fields if a register has some leading bits
232 that are always equal to the sign bit, and what we know about the
233 nonzero bits of a register, specifically which bits are known to be
236 If an entry is zero, it means that we don't know anything special. */
238 unsigned char sign_bit_copies
;
240 unsigned HOST_WIDE_INT nonzero_bits
;
242 /* Record the value of the label_tick when the last truncation
243 happened. The field truncated_to_mode is only valid if
244 truncation_label == label_tick. */
246 int truncation_label
;
248 /* Record the last truncation seen for this register. If truncation
249 is not a nop to this mode we might be able to save an explicit
250 truncation if we know that value already contains a truncated
253 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
257 static vec
<reg_stat_type
> reg_stat
;
259 /* One plus the highest pseudo for which we track REG_N_SETS.
260 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
261 but during combine_split_insns new pseudos can be created. As we don't have
262 updated DF information in that case, it is hard to initialize the array
263 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
264 so instead of growing the arrays, just assume all newly created pseudos
265 during combine might be set multiple times. */
267 static unsigned int reg_n_sets_max
;
269 /* Record the luid of the last insn that invalidated memory
270 (anything that writes memory, and subroutine calls, but not pushes). */
272 static int mem_last_set
;
274 /* Record the luid of the last CALL_INSN
275 so we can tell whether a potential combination crosses any calls. */
277 static int last_call_luid
;
279 /* When `subst' is called, this is the insn that is being modified
280 (by combining in a previous insn). The PATTERN of this insn
281 is still the old pattern partially modified and it should not be
282 looked at, but this may be used to examine the successors of the insn
283 to judge whether a simplification is valid. */
285 static rtx_insn
*subst_insn
;
287 /* This is the lowest LUID that `subst' is currently dealing with.
288 get_last_value will not return a value if the register was set at or
289 after this LUID. If not for this mechanism, we could get confused if
290 I2 or I1 in try_combine were an insn that used the old value of a register
291 to obtain a new value. In that case, we might erroneously get the
292 new value of the register when we wanted the old one. */
294 static int subst_low_luid
;
296 /* This contains any hard registers that are used in newpat; reg_dead_at_p
297 must consider all these registers to be always live. */
299 static HARD_REG_SET newpat_used_regs
;
301 /* This is an insn to which a LOG_LINKS entry has been added. If this
302 insn is the earlier than I2 or I3, combine should rescan starting at
305 static rtx_insn
*added_links_insn
;
307 /* And similarly, for notes. */
309 static rtx_insn
*added_notes_insn
;
311 /* Basic block in which we are performing combines. */
312 static basic_block this_basic_block
;
313 static bool optimize_this_for_speed_p
;
316 /* Length of the currently allocated uid_insn_cost array. */
318 static int max_uid_known
;
320 /* The following array records the insn_cost for every insn
321 in the instruction stream. */
323 static int *uid_insn_cost
;
325 /* The following array records the LOG_LINKS for every insn in the
326 instruction stream as struct insn_link pointers. */
331 struct insn_link
*next
;
334 static struct insn_link
**uid_log_links
;
337 insn_uid_check (const_rtx insn
)
339 int uid
= INSN_UID (insn
);
340 gcc_checking_assert (uid
<= max_uid_known
);
344 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
345 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
347 #define FOR_EACH_LOG_LINK(L, INSN) \
348 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
350 /* Links for LOG_LINKS are allocated from this obstack. */
352 static struct obstack insn_link_obstack
;
354 /* Allocate a link. */
356 static inline struct insn_link
*
357 alloc_insn_link (rtx_insn
*insn
, unsigned int regno
, struct insn_link
*next
)
360 = (struct insn_link
*) obstack_alloc (&insn_link_obstack
,
361 sizeof (struct insn_link
));
368 /* Incremented for each basic block. */
370 static int label_tick
;
372 /* Reset to label_tick for each extended basic block in scanning order. */
374 static int label_tick_ebb_start
;
376 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
377 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
379 static scalar_int_mode nonzero_bits_mode
;
381 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
382 be safely used. It is zero while computing them and after combine has
383 completed. This former test prevents propagating values based on
384 previously set values, which can be incorrect if a variable is modified
387 static int nonzero_sign_valid
;
390 /* Record one modification to rtl structure
391 to be undone by storing old_contents into *where. */
393 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
, UNDO_LINKS
};
399 union { rtx r
; int i
; machine_mode m
; struct insn_link
*l
; } old_contents
;
400 union { rtx
*r
; int *i
; struct insn_link
**l
; } where
;
403 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
404 num_undo says how many are currently recorded.
406 other_insn is nonzero if we have modified some other insn in the process
407 of working on subst_insn. It must be verified too. */
413 rtx_insn
*other_insn
;
416 static struct undobuf undobuf
;
418 /* Number of times the pseudo being substituted for
419 was found and replaced. */
421 static int n_occurrences
;
423 static rtx
reg_nonzero_bits_for_combine (const_rtx
, scalar_int_mode
,
425 unsigned HOST_WIDE_INT
*);
426 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, scalar_int_mode
,
429 static void do_SUBST (rtx
*, rtx
);
430 static void do_SUBST_INT (int *, int);
431 static void init_reg_last (void);
432 static void setup_incoming_promotions (rtx_insn
*);
433 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
434 static int cant_combine_insn_p (rtx_insn
*);
435 static int can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
436 rtx_insn
*, rtx_insn
*, rtx
*, rtx
*);
437 static int combinable_i3pat (rtx_insn
*, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
438 static int contains_muldiv (rtx
);
439 static rtx_insn
*try_combine (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
441 static void undo_all (void);
442 static void undo_commit (void);
443 static rtx
*find_split_point (rtx
*, rtx_insn
*, bool);
444 static rtx
subst (rtx
, rtx
, rtx
, int, int, int);
445 static rtx
combine_simplify_rtx (rtx
, machine_mode
, int, int);
446 static rtx
simplify_if_then_else (rtx
);
447 static rtx
simplify_set (rtx
);
448 static rtx
simplify_logical (rtx
);
449 static rtx
expand_compound_operation (rtx
);
450 static const_rtx
expand_field_assignment (const_rtx
);
451 static rtx
make_extraction (machine_mode
, rtx
, HOST_WIDE_INT
,
452 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
453 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
454 unsigned HOST_WIDE_INT
*);
455 static rtx
canon_reg_for_combine (rtx
, rtx
);
456 static rtx
force_int_to_mode (rtx
, scalar_int_mode
, scalar_int_mode
,
457 scalar_int_mode
, unsigned HOST_WIDE_INT
, int);
458 static rtx
force_to_mode (rtx
, machine_mode
,
459 unsigned HOST_WIDE_INT
, int);
460 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
461 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
462 static int rtx_equal_for_field_assignment_p (rtx
, rtx
, bool = false);
463 static rtx
make_field_assignment (rtx
);
464 static rtx
apply_distributive_law (rtx
);
465 static rtx
distribute_and_simplify_rtx (rtx
, int);
466 static rtx
simplify_and_const_int_1 (scalar_int_mode
, rtx
,
467 unsigned HOST_WIDE_INT
);
468 static rtx
simplify_and_const_int (rtx
, scalar_int_mode
, rtx
,
469 unsigned HOST_WIDE_INT
);
470 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
471 HOST_WIDE_INT
, machine_mode
, int *);
472 static rtx
simplify_shift_const_1 (enum rtx_code
, machine_mode
, rtx
, int);
473 static rtx
simplify_shift_const (rtx
, enum rtx_code
, machine_mode
, rtx
,
475 static int recog_for_combine (rtx
*, rtx_insn
*, rtx
*);
476 static rtx
gen_lowpart_for_combine (machine_mode
, rtx
);
477 static enum rtx_code
simplify_compare_const (enum rtx_code
, machine_mode
,
479 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
480 static void update_table_tick (rtx
);
481 static void record_value_for_reg (rtx
, rtx_insn
*, rtx
);
482 static void check_promoted_subreg (rtx_insn
*, rtx
);
483 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
484 static void record_dead_and_set_regs (rtx_insn
*);
485 static int get_last_value_validate (rtx
*, rtx_insn
*, int, int);
486 static rtx
get_last_value (const_rtx
);
487 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
488 static int reg_dead_at_p (rtx
, rtx_insn
*);
489 static void move_deaths (rtx
, rtx
, int, rtx_insn
*, rtx
*);
490 static int reg_bitfield_target_p (rtx
, rtx
);
491 static void distribute_notes (rtx
, rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx
, rtx
, rtx
);
492 static void distribute_links (struct insn_link
*);
493 static void mark_used_regs_combine (rtx
);
494 static void record_promoted_value (rtx_insn
*, rtx
);
495 static bool unmentioned_reg_p (rtx
, rtx
);
496 static void record_truncated_values (rtx
*, void *);
497 static bool reg_truncated_to_mode (machine_mode
, const_rtx
);
498 static rtx
gen_lowpart_or_truncate (machine_mode
, rtx
);
501 /* It is not safe to use ordinary gen_lowpart in combine.
502 See comments in gen_lowpart_for_combine. */
503 #undef RTL_HOOKS_GEN_LOWPART
504 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
506 /* Our implementation of gen_lowpart never emits a new pseudo. */
507 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
508 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
510 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
511 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
513 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
514 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
516 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
517 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
519 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
522 /* Convenience wrapper for the canonicalize_comparison target hook.
523 Target hooks cannot use enum rtx_code. */
525 target_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
,
526 bool op0_preserve_value
)
528 int code_int
= (int)*code
;
529 targetm
.canonicalize_comparison (&code_int
, op0
, op1
, op0_preserve_value
);
530 *code
= (enum rtx_code
)code_int
;
533 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
534 PATTERN cannot be split. Otherwise, it returns an insn sequence.
535 This is a wrapper around split_insns which ensures that the
536 reg_stat vector is made larger if the splitter creates a new
540 combine_split_insns (rtx pattern
, rtx_insn
*insn
)
545 ret
= split_insns (pattern
, insn
);
546 nregs
= max_reg_num ();
547 if (nregs
> reg_stat
.length ())
548 reg_stat
.safe_grow_cleared (nregs
);
552 /* This is used by find_single_use to locate an rtx in LOC that
553 contains exactly one use of DEST, which is typically either a REG
554 or CC0. It returns a pointer to the innermost rtx expression
555 containing DEST. Appearances of DEST that are being used to
556 totally replace it are not counted. */
559 find_single_use_1 (rtx dest
, rtx
*loc
)
562 enum rtx_code code
= GET_CODE (x
);
578 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
579 of a REG that occupies all of the REG, the insn uses DEST if
580 it is mentioned in the destination or the source. Otherwise, we
581 need just check the source. */
582 if (GET_CODE (SET_DEST (x
)) != CC0
583 && GET_CODE (SET_DEST (x
)) != PC
584 && !REG_P (SET_DEST (x
))
585 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
586 && REG_P (SUBREG_REG (SET_DEST (x
)))
587 && !read_modify_subreg_p (SET_DEST (x
))))
590 return find_single_use_1 (dest
, &SET_SRC (x
));
594 return find_single_use_1 (dest
, &XEXP (x
, 0));
600 /* If it wasn't one of the common cases above, check each expression and
601 vector of this code. Look for a unique usage of DEST. */
603 fmt
= GET_RTX_FORMAT (code
);
604 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
608 if (dest
== XEXP (x
, i
)
609 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
610 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
613 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
616 result
= this_result
;
617 else if (this_result
)
618 /* Duplicate usage. */
621 else if (fmt
[i
] == 'E')
625 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
627 if (XVECEXP (x
, i
, j
) == dest
629 && REG_P (XVECEXP (x
, i
, j
))
630 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
633 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
636 result
= this_result
;
637 else if (this_result
)
647 /* See if DEST, produced in INSN, is used only a single time in the
648 sequel. If so, return a pointer to the innermost rtx expression in which
651 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
653 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
654 care about REG_DEAD notes or LOG_LINKS.
656 Otherwise, we find the single use by finding an insn that has a
657 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
658 only referenced once in that insn, we know that it must be the first
659 and last insn referencing DEST. */
662 find_single_use (rtx dest
, rtx_insn
*insn
, rtx_insn
**ploc
)
667 struct insn_link
*link
;
671 next
= NEXT_INSN (insn
);
673 || (!NONJUMP_INSN_P (next
) && !JUMP_P (next
)))
676 result
= find_single_use_1 (dest
, &PATTERN (next
));
685 bb
= BLOCK_FOR_INSN (insn
);
686 for (next
= NEXT_INSN (insn
);
687 next
&& BLOCK_FOR_INSN (next
) == bb
;
688 next
= NEXT_INSN (next
))
689 if (NONDEBUG_INSN_P (next
) && dead_or_set_p (next
, dest
))
691 FOR_EACH_LOG_LINK (link
, next
)
692 if (link
->insn
== insn
&& link
->regno
== REGNO (dest
))
697 result
= find_single_use_1 (dest
, &PATTERN (next
));
707 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
708 insn. The substitution can be undone by undo_all. If INTO is already
709 set to NEWVAL, do not record this change. Because computing NEWVAL might
710 also call SUBST, we have to compute it before we put anything into
714 do_SUBST (rtx
*into
, rtx newval
)
719 if (oldval
== newval
)
722 /* We'd like to catch as many invalid transformations here as
723 possible. Unfortunately, there are way too many mode changes
724 that are perfectly valid, so we'd waste too much effort for
725 little gain doing the checks here. Focus on catching invalid
726 transformations involving integer constants. */
727 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
728 && CONST_INT_P (newval
))
730 /* Sanity check that we're replacing oldval with a CONST_INT
731 that is a valid sign-extension for the original mode. */
732 gcc_assert (INTVAL (newval
)
733 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
735 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
736 CONST_INT is not valid, because after the replacement, the
737 original mode would be gone. Unfortunately, we can't tell
738 when do_SUBST is called to replace the operand thereof, so we
739 perform this test on oldval instead, checking whether an
740 invalid replacement took place before we got here. */
741 gcc_assert (!(GET_CODE (oldval
) == SUBREG
742 && CONST_INT_P (SUBREG_REG (oldval
))));
743 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
744 && CONST_INT_P (XEXP (oldval
, 0))));
748 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
750 buf
= XNEW (struct undo
);
752 buf
->kind
= UNDO_RTX
;
754 buf
->old_contents
.r
= oldval
;
757 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
760 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
762 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
763 for the value of a HOST_WIDE_INT value (including CONST_INT) is
767 do_SUBST_INT (int *into
, int newval
)
772 if (oldval
== newval
)
776 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
778 buf
= XNEW (struct undo
);
780 buf
->kind
= UNDO_INT
;
782 buf
->old_contents
.i
= oldval
;
785 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
788 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
790 /* Similar to SUBST, but just substitute the mode. This is used when
791 changing the mode of a pseudo-register, so that any other
792 references to the entry in the regno_reg_rtx array will change as
796 do_SUBST_MODE (rtx
*into
, machine_mode newval
)
799 machine_mode oldval
= GET_MODE (*into
);
801 if (oldval
== newval
)
805 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
807 buf
= XNEW (struct undo
);
809 buf
->kind
= UNDO_MODE
;
811 buf
->old_contents
.m
= oldval
;
812 adjust_reg_mode (*into
, newval
);
814 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
817 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
819 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
822 do_SUBST_LINK (struct insn_link
**into
, struct insn_link
*newval
)
825 struct insn_link
* oldval
= *into
;
827 if (oldval
== newval
)
831 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
833 buf
= XNEW (struct undo
);
835 buf
->kind
= UNDO_LINKS
;
837 buf
->old_contents
.l
= oldval
;
840 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
843 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
845 /* Subroutine of try_combine. Determine whether the replacement patterns
846 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
847 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
848 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
849 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
850 of all the instructions can be estimated and the replacements are more
851 expensive than the original sequence. */
854 combine_validate_cost (rtx_insn
*i0
, rtx_insn
*i1
, rtx_insn
*i2
, rtx_insn
*i3
,
855 rtx newpat
, rtx newi2pat
, rtx newotherpat
)
857 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
858 int new_i2_cost
, new_i3_cost
;
859 int old_cost
, new_cost
;
861 /* Lookup the original insn_costs. */
862 i2_cost
= INSN_COST (i2
);
863 i3_cost
= INSN_COST (i3
);
867 i1_cost
= INSN_COST (i1
);
870 i0_cost
= INSN_COST (i0
);
871 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
872 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
876 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
877 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
883 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
884 i1_cost
= i0_cost
= 0;
887 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
889 if (old_cost
&& i1
&& INSN_UID (i1
) == INSN_UID (i2
))
893 /* Calculate the replacement insn_costs. */
894 rtx tmp
= PATTERN (i3
);
895 PATTERN (i3
) = newpat
;
896 int tmpi
= INSN_CODE (i3
);
898 new_i3_cost
= insn_cost (i3
, optimize_this_for_speed_p
);
900 INSN_CODE (i3
) = tmpi
;
904 PATTERN (i2
) = newi2pat
;
905 tmpi
= INSN_CODE (i2
);
907 new_i2_cost
= insn_cost (i2
, optimize_this_for_speed_p
);
909 INSN_CODE (i2
) = tmpi
;
910 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
911 ? new_i2_cost
+ new_i3_cost
: 0;
915 new_cost
= new_i3_cost
;
919 if (undobuf
.other_insn
)
921 int old_other_cost
, new_other_cost
;
923 old_other_cost
= INSN_COST (undobuf
.other_insn
);
924 tmp
= PATTERN (undobuf
.other_insn
);
925 PATTERN (undobuf
.other_insn
) = newotherpat
;
926 tmpi
= INSN_CODE (undobuf
.other_insn
);
927 INSN_CODE (undobuf
.other_insn
) = -1;
928 new_other_cost
= insn_cost (undobuf
.other_insn
,
929 optimize_this_for_speed_p
);
930 PATTERN (undobuf
.other_insn
) = tmp
;
931 INSN_CODE (undobuf
.other_insn
) = tmpi
;
932 if (old_other_cost
> 0 && new_other_cost
> 0)
934 old_cost
+= old_other_cost
;
935 new_cost
+= new_other_cost
;
941 /* Disallow this combination if both new_cost and old_cost are greater than
942 zero, and new_cost is greater than old cost. */
943 int reject
= old_cost
> 0 && new_cost
> old_cost
;
947 fprintf (dump_file
, "%s combination of insns ",
948 reject
? "rejecting" : "allowing");
950 fprintf (dump_file
, "%d, ", INSN_UID (i0
));
951 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
952 fprintf (dump_file
, "%d, ", INSN_UID (i1
));
953 fprintf (dump_file
, "%d and %d\n", INSN_UID (i2
), INSN_UID (i3
));
955 fprintf (dump_file
, "original costs ");
957 fprintf (dump_file
, "%d + ", i0_cost
);
958 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
959 fprintf (dump_file
, "%d + ", i1_cost
);
960 fprintf (dump_file
, "%d + %d = %d\n", i2_cost
, i3_cost
, old_cost
);
963 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
964 new_i2_cost
, new_i3_cost
, new_cost
);
966 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
972 /* Update the uid_insn_cost array with the replacement costs. */
973 INSN_COST (i2
) = new_i2_cost
;
974 INSN_COST (i3
) = new_i3_cost
;
986 /* Delete any insns that copy a register to itself.
987 Return true if the CFG was changed. */
990 delete_noop_moves (void)
992 rtx_insn
*insn
, *next
;
995 bool edges_deleted
= false;
997 FOR_EACH_BB_FN (bb
, cfun
)
999 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
1001 next
= NEXT_INSN (insn
);
1002 if (INSN_P (insn
) && noop_move_p (insn
))
1005 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
1007 edges_deleted
|= delete_insn_and_edges (insn
);
1012 return edges_deleted
;
1016 /* Return false if we do not want to (or cannot) combine DEF. */
1018 can_combine_def_p (df_ref def
)
1020 /* Do not consider if it is pre/post modification in MEM. */
1021 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
1024 unsigned int regno
= DF_REF_REGNO (def
);
1026 /* Do not combine frame pointer adjustments. */
1027 if ((regno
== FRAME_POINTER_REGNUM
1028 && (!reload_completed
|| frame_pointer_needed
))
1029 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1030 && regno
== HARD_FRAME_POINTER_REGNUM
1031 && (!reload_completed
|| frame_pointer_needed
))
1032 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1033 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
]))
1039 /* Return false if we do not want to (or cannot) combine USE. */
1041 can_combine_use_p (df_ref use
)
1043 /* Do not consider the usage of the stack pointer by function call. */
1044 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1050 /* Fill in log links field for all insns. */
1053 create_log_links (void)
1056 rtx_insn
**next_use
;
1060 next_use
= XCNEWVEC (rtx_insn
*, max_reg_num ());
1062 /* Pass through each block from the end, recording the uses of each
1063 register and establishing log links when def is encountered.
1064 Note that we do not clear next_use array in order to save time,
1065 so we have to test whether the use is in the same basic block as def.
1067 There are a few cases below when we do not consider the definition or
1068 usage -- these are taken from original flow.c did. Don't ask me why it is
1069 done this way; I don't know and if it works, I don't want to know. */
1071 FOR_EACH_BB_FN (bb
, cfun
)
1073 FOR_BB_INSNS_REVERSE (bb
, insn
)
1075 if (!NONDEBUG_INSN_P (insn
))
1078 /* Log links are created only once. */
1079 gcc_assert (!LOG_LINKS (insn
));
1081 FOR_EACH_INSN_DEF (def
, insn
)
1083 unsigned int regno
= DF_REF_REGNO (def
);
1086 if (!next_use
[regno
])
1089 if (!can_combine_def_p (def
))
1092 use_insn
= next_use
[regno
];
1093 next_use
[regno
] = NULL
;
1095 if (BLOCK_FOR_INSN (use_insn
) != bb
)
1100 We don't build a LOG_LINK for hard registers contained
1101 in ASM_OPERANDs. If these registers get replaced,
1102 we might wind up changing the semantics of the insn,
1103 even if reload can make what appear to be valid
1104 assignments later. */
1105 if (regno
< FIRST_PSEUDO_REGISTER
1106 && asm_noperands (PATTERN (use_insn
)) >= 0)
1109 /* Don't add duplicate links between instructions. */
1110 struct insn_link
*links
;
1111 FOR_EACH_LOG_LINK (links
, use_insn
)
1112 if (insn
== links
->insn
&& regno
== links
->regno
)
1116 LOG_LINKS (use_insn
)
1117 = alloc_insn_link (insn
, regno
, LOG_LINKS (use_insn
));
1120 FOR_EACH_INSN_USE (use
, insn
)
1121 if (can_combine_use_p (use
))
1122 next_use
[DF_REF_REGNO (use
)] = insn
;
1129 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1130 true if we found a LOG_LINK that proves that A feeds B. This only works
1131 if there are no instructions between A and B which could have a link
1132 depending on A, since in that case we would not record a link for B.
1133 We also check the implicit dependency created by a cc0 setter/user
1137 insn_a_feeds_b (rtx_insn
*a
, rtx_insn
*b
)
1139 struct insn_link
*links
;
1140 FOR_EACH_LOG_LINK (links
, b
)
1141 if (links
->insn
== a
)
1143 if (HAVE_cc0
&& sets_cc0_p (a
))
1148 /* Main entry point for combiner. F is the first insn of the function.
1149 NREGS is the first unused pseudo-reg number.
1151 Return nonzero if the CFG was changed (e.g. if the combiner has
1152 turned an indirect jump instruction into a direct jump). */
1154 combine_instructions (rtx_insn
*f
, unsigned int nregs
)
1156 rtx_insn
*insn
, *next
;
1158 struct insn_link
*links
, *nextlinks
;
1160 basic_block last_bb
;
1162 int new_direct_jump_p
= 0;
1164 for (first
= f
; first
&& !NONDEBUG_INSN_P (first
); )
1165 first
= NEXT_INSN (first
);
1169 combine_attempts
= 0;
1172 combine_successes
= 0;
1174 rtl_hooks
= combine_rtl_hooks
;
1176 reg_stat
.safe_grow_cleared (nregs
);
1178 init_recog_no_volatile ();
1180 /* Allocate array for insn info. */
1181 max_uid_known
= get_max_uid ();
1182 uid_log_links
= XCNEWVEC (struct insn_link
*, max_uid_known
+ 1);
1183 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1184 gcc_obstack_init (&insn_link_obstack
);
1186 nonzero_bits_mode
= int_mode_for_size (HOST_BITS_PER_WIDE_INT
, 0).require ();
1188 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1189 problems when, for example, we have j <<= 1 in a loop. */
1191 nonzero_sign_valid
= 0;
1192 label_tick
= label_tick_ebb_start
= 1;
1194 /* Scan all SETs and see if we can deduce anything about what
1195 bits are known to be zero for some registers and how many copies
1196 of the sign bit are known to exist for those registers.
1198 Also set any known values so that we can use it while searching
1199 for what bits are known to be set. */
1201 setup_incoming_promotions (first
);
1202 /* Allow the entry block and the first block to fall into the same EBB.
1203 Conceptually the incoming promotions are assigned to the entry block. */
1204 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1206 create_log_links ();
1207 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1209 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1214 if (!single_pred_p (this_basic_block
)
1215 || single_pred (this_basic_block
) != last_bb
)
1216 label_tick_ebb_start
= label_tick
;
1217 last_bb
= this_basic_block
;
1219 FOR_BB_INSNS (this_basic_block
, insn
)
1220 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1224 subst_low_luid
= DF_INSN_LUID (insn
);
1227 note_stores (insn
, set_nonzero_bits_and_sign_copies
, insn
);
1228 record_dead_and_set_regs (insn
);
1231 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1232 if (REG_NOTE_KIND (links
) == REG_INC
)
1233 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1236 /* Record the current insn_cost of this instruction. */
1237 if (NONJUMP_INSN_P (insn
))
1238 INSN_COST (insn
) = insn_cost (insn
, optimize_this_for_speed_p
);
1241 fprintf (dump_file
, "insn_cost %d for ", INSN_COST (insn
));
1242 dump_insn_slim (dump_file
, insn
);
1247 nonzero_sign_valid
= 1;
1249 /* Now scan all the insns in forward order. */
1250 label_tick
= label_tick_ebb_start
= 1;
1252 setup_incoming_promotions (first
);
1253 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1254 int max_combine
= param_max_combine_insns
;
1256 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1258 rtx_insn
*last_combined_insn
= NULL
;
1260 /* Ignore instruction combination in basic blocks that are going to
1261 be removed as unreachable anyway. See PR82386. */
1262 if (EDGE_COUNT (this_basic_block
->preds
) == 0)
1265 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1270 if (!single_pred_p (this_basic_block
)
1271 || single_pred (this_basic_block
) != last_bb
)
1272 label_tick_ebb_start
= label_tick
;
1273 last_bb
= this_basic_block
;
1275 rtl_profile_for_bb (this_basic_block
);
1276 for (insn
= BB_HEAD (this_basic_block
);
1277 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1278 insn
= next
? next
: NEXT_INSN (insn
))
1281 if (!NONDEBUG_INSN_P (insn
))
1284 while (last_combined_insn
1285 && (!NONDEBUG_INSN_P (last_combined_insn
)
1286 || last_combined_insn
->deleted ()))
1287 last_combined_insn
= PREV_INSN (last_combined_insn
);
1288 if (last_combined_insn
== NULL_RTX
1289 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1290 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1291 last_combined_insn
= insn
;
1293 /* See if we know about function return values before this
1294 insn based upon SUBREG flags. */
1295 check_promoted_subreg (insn
, PATTERN (insn
));
1297 /* See if we can find hardregs and subreg of pseudos in
1298 narrower modes. This could help turning TRUNCATEs
1300 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1302 /* Try this insn with each insn it links back to. */
1304 FOR_EACH_LOG_LINK (links
, insn
)
1305 if ((next
= try_combine (insn
, links
->insn
, NULL
,
1306 NULL
, &new_direct_jump_p
,
1307 last_combined_insn
)) != 0)
1309 statistics_counter_event (cfun
, "two-insn combine", 1);
1313 /* Try each sequence of three linked insns ending with this one. */
1315 if (max_combine
>= 3)
1316 FOR_EACH_LOG_LINK (links
, insn
)
1318 rtx_insn
*link
= links
->insn
;
1320 /* If the linked insn has been replaced by a note, then there
1321 is no point in pursuing this chain any further. */
1325 FOR_EACH_LOG_LINK (nextlinks
, link
)
1326 if ((next
= try_combine (insn
, link
, nextlinks
->insn
,
1327 NULL
, &new_direct_jump_p
,
1328 last_combined_insn
)) != 0)
1330 statistics_counter_event (cfun
, "three-insn combine", 1);
1335 /* Try to combine a jump insn that uses CC0
1336 with a preceding insn that sets CC0, and maybe with its
1337 logical predecessor as well.
1338 This is how we make decrement-and-branch insns.
1339 We need this special code because data flow connections
1340 via CC0 do not get entered in LOG_LINKS. */
1344 && (prev
= prev_nonnote_insn (insn
)) != 0
1345 && NONJUMP_INSN_P (prev
)
1346 && sets_cc0_p (PATTERN (prev
)))
1348 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1350 last_combined_insn
)) != 0)
1353 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1354 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1355 NULL
, &new_direct_jump_p
,
1356 last_combined_insn
)) != 0)
1360 /* Do the same for an insn that explicitly references CC0. */
1361 if (HAVE_cc0
&& NONJUMP_INSN_P (insn
)
1362 && (prev
= prev_nonnote_insn (insn
)) != 0
1363 && NONJUMP_INSN_P (prev
)
1364 && sets_cc0_p (PATTERN (prev
))
1365 && GET_CODE (PATTERN (insn
)) == SET
1366 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (insn
))))
1368 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1370 last_combined_insn
)) != 0)
1373 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1374 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1375 NULL
, &new_direct_jump_p
,
1376 last_combined_insn
)) != 0)
1380 /* Finally, see if any of the insns that this insn links to
1381 explicitly references CC0. If so, try this insn, that insn,
1382 and its predecessor if it sets CC0. */
1385 FOR_EACH_LOG_LINK (links
, insn
)
1386 if (NONJUMP_INSN_P (links
->insn
)
1387 && GET_CODE (PATTERN (links
->insn
)) == SET
1388 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (links
->insn
)))
1389 && (prev
= prev_nonnote_insn (links
->insn
)) != 0
1390 && NONJUMP_INSN_P (prev
)
1391 && sets_cc0_p (PATTERN (prev
))
1392 && (next
= try_combine (insn
, links
->insn
,
1393 prev
, NULL
, &new_direct_jump_p
,
1394 last_combined_insn
)) != 0)
1398 /* Try combining an insn with two different insns whose results it
1400 if (max_combine
>= 3)
1401 FOR_EACH_LOG_LINK (links
, insn
)
1402 for (nextlinks
= links
->next
; nextlinks
;
1403 nextlinks
= nextlinks
->next
)
1404 if ((next
= try_combine (insn
, links
->insn
,
1405 nextlinks
->insn
, NULL
,
1407 last_combined_insn
)) != 0)
1410 statistics_counter_event (cfun
, "three-insn combine", 1);
1414 /* Try four-instruction combinations. */
1415 if (max_combine
>= 4)
1416 FOR_EACH_LOG_LINK (links
, insn
)
1418 struct insn_link
*next1
;
1419 rtx_insn
*link
= links
->insn
;
1421 /* If the linked insn has been replaced by a note, then there
1422 is no point in pursuing this chain any further. */
1426 FOR_EACH_LOG_LINK (next1
, link
)
1428 rtx_insn
*link1
= next1
->insn
;
1431 /* I0 -> I1 -> I2 -> I3. */
1432 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1433 if ((next
= try_combine (insn
, link
, link1
,
1436 last_combined_insn
)) != 0)
1438 statistics_counter_event (cfun
, "four-insn combine", 1);
1441 /* I0, I1 -> I2, I2 -> I3. */
1442 for (nextlinks
= next1
->next
; nextlinks
;
1443 nextlinks
= nextlinks
->next
)
1444 if ((next
= try_combine (insn
, link
, link1
,
1447 last_combined_insn
)) != 0)
1449 statistics_counter_event (cfun
, "four-insn combine", 1);
1454 for (next1
= links
->next
; next1
; next1
= next1
->next
)
1456 rtx_insn
*link1
= next1
->insn
;
1459 /* I0 -> I2; I1, I2 -> I3. */
1460 FOR_EACH_LOG_LINK (nextlinks
, link
)
1461 if ((next
= try_combine (insn
, link
, link1
,
1464 last_combined_insn
)) != 0)
1466 statistics_counter_event (cfun
, "four-insn combine", 1);
1469 /* I0 -> I1; I1, I2 -> I3. */
1470 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1471 if ((next
= try_combine (insn
, link
, link1
,
1474 last_combined_insn
)) != 0)
1476 statistics_counter_event (cfun
, "four-insn combine", 1);
1482 /* Try this insn with each REG_EQUAL note it links back to. */
1483 FOR_EACH_LOG_LINK (links
, insn
)
1486 rtx_insn
*temp
= links
->insn
;
1487 if ((set
= single_set (temp
)) != 0
1488 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1489 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1490 /* Avoid using a register that may already been marked
1491 dead by an earlier instruction. */
1492 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1493 && (GET_MODE (note
) == VOIDmode
1494 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1495 : (GET_MODE (SET_DEST (set
)) == GET_MODE (note
)
1496 && (GET_CODE (SET_DEST (set
)) != ZERO_EXTRACT
1497 || (GET_MODE (XEXP (SET_DEST (set
), 0))
1498 == GET_MODE (note
))))))
1500 /* Temporarily replace the set's source with the
1501 contents of the REG_EQUAL note. The insn will
1502 be deleted or recognized by try_combine. */
1503 rtx orig_src
= SET_SRC (set
);
1504 rtx orig_dest
= SET_DEST (set
);
1505 if (GET_CODE (SET_DEST (set
)) == ZERO_EXTRACT
)
1506 SET_DEST (set
) = XEXP (SET_DEST (set
), 0);
1507 SET_SRC (set
) = note
;
1509 i2mod_old_rhs
= copy_rtx (orig_src
);
1510 i2mod_new_rhs
= copy_rtx (note
);
1511 next
= try_combine (insn
, i2mod
, NULL
, NULL
,
1513 last_combined_insn
);
1517 statistics_counter_event (cfun
, "insn-with-note combine", 1);
1520 SET_SRC (set
) = orig_src
;
1521 SET_DEST (set
) = orig_dest
;
1526 record_dead_and_set_regs (insn
);
1533 default_rtl_profile ();
1535 new_direct_jump_p
|= purge_all_dead_edges ();
1536 new_direct_jump_p
|= delete_noop_moves ();
1539 obstack_free (&insn_link_obstack
, NULL
);
1540 free (uid_log_links
);
1541 free (uid_insn_cost
);
1542 reg_stat
.release ();
1545 struct undo
*undo
, *next
;
1546 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1554 total_attempts
+= combine_attempts
;
1555 total_merges
+= combine_merges
;
1556 total_extras
+= combine_extras
;
1557 total_successes
+= combine_successes
;
1559 nonzero_sign_valid
= 0;
1560 rtl_hooks
= general_rtl_hooks
;
1562 /* Make recognizer allow volatile MEMs again. */
1565 return new_direct_jump_p
;
1568 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1571 init_reg_last (void)
1576 FOR_EACH_VEC_ELT (reg_stat
, i
, p
)
1577 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1580 /* Set up any promoted values for incoming argument registers. */
1583 setup_incoming_promotions (rtx_insn
*first
)
1586 bool strictly_local
= false;
1588 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1589 arg
= DECL_CHAIN (arg
))
1591 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1593 machine_mode mode1
, mode2
, mode3
, mode4
;
1595 /* Only continue if the incoming argument is in a register. */
1599 /* Determine, if possible, whether all call sites of the current
1600 function lie within the current compilation unit. (This does
1601 take into account the exporting of a function via taking its
1602 address, and so forth.) */
1604 = cgraph_node::local_info_node (current_function_decl
)->local
;
1606 /* The mode and signedness of the argument before any promotions happen
1607 (equal to the mode of the pseudo holding it at that stage). */
1608 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1609 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1611 /* The mode and signedness of the argument after any source language and
1612 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1613 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1614 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1616 /* The mode and signedness of the argument as it is actually passed,
1617 see assign_parm_setup_reg in function.c. */
1618 mode3
= promote_function_mode (TREE_TYPE (arg
), mode1
, &uns3
,
1619 TREE_TYPE (cfun
->decl
), 0);
1621 /* The mode of the register in which the argument is being passed. */
1622 mode4
= GET_MODE (reg
);
1624 /* Eliminate sign extensions in the callee when:
1625 (a) A mode promotion has occurred; */
1628 /* (b) The mode of the register is the same as the mode of
1629 the argument as it is passed; */
1632 /* (c) There's no language level extension; */
1635 /* (c.1) All callers are from the current compilation unit. If that's
1636 the case we don't have to rely on an ABI, we only have to know
1637 what we're generating right now, and we know that we will do the
1638 mode1 to mode2 promotion with the given sign. */
1639 else if (!strictly_local
)
1641 /* (c.2) The combination of the two promotions is useful. This is
1642 true when the signs match, or if the first promotion is unsigned.
1643 In the later case, (sign_extend (zero_extend x)) is the same as
1644 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1650 /* Record that the value was promoted from mode1 to mode3,
1651 so that any sign extension at the head of the current
1652 function may be eliminated. */
1653 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1654 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1655 record_value_for_reg (reg
, first
, x
);
1659 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1660 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1661 because some machines (maybe most) will actually do the sign-extension and
1662 this is the conservative approach.
1664 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1668 sign_extend_short_imm (rtx src
, machine_mode mode
, unsigned int prec
)
1670 scalar_int_mode int_mode
;
1671 if (CONST_INT_P (src
)
1672 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1673 && GET_MODE_PRECISION (int_mode
) < prec
1675 && val_signbit_known_set_p (int_mode
, INTVAL (src
)))
1676 src
= GEN_INT (INTVAL (src
) | ~GET_MODE_MASK (int_mode
));
1681 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1685 update_rsp_from_reg_equal (reg_stat_type
*rsp
, rtx_insn
*insn
, const_rtx set
,
1688 rtx reg_equal_note
= insn
? find_reg_equal_equiv_note (insn
) : NULL_RTX
;
1689 unsigned HOST_WIDE_INT bits
= 0;
1690 rtx reg_equal
= NULL
, src
= SET_SRC (set
);
1691 unsigned int num
= 0;
1694 reg_equal
= XEXP (reg_equal_note
, 0);
1696 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
1698 src
= sign_extend_short_imm (src
, GET_MODE (x
), BITS_PER_WORD
);
1700 reg_equal
= sign_extend_short_imm (reg_equal
, GET_MODE (x
), BITS_PER_WORD
);
1703 /* Don't call nonzero_bits if it cannot change anything. */
1704 if (rsp
->nonzero_bits
!= HOST_WIDE_INT_M1U
)
1706 machine_mode mode
= GET_MODE (x
);
1707 if (GET_MODE_CLASS (mode
) == MODE_INT
1708 && HWI_COMPUTABLE_MODE_P (mode
))
1709 mode
= nonzero_bits_mode
;
1710 bits
= nonzero_bits (src
, mode
);
1711 if (reg_equal
&& bits
)
1712 bits
&= nonzero_bits (reg_equal
, mode
);
1713 rsp
->nonzero_bits
|= bits
;
1716 /* Don't call num_sign_bit_copies if it cannot change anything. */
1717 if (rsp
->sign_bit_copies
!= 1)
1719 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1720 if (reg_equal
&& maybe_ne (num
, GET_MODE_PRECISION (GET_MODE (x
))))
1722 unsigned int numeq
= num_sign_bit_copies (reg_equal
, GET_MODE (x
));
1723 if (num
== 0 || numeq
> num
)
1726 if (rsp
->sign_bit_copies
== 0 || num
< rsp
->sign_bit_copies
)
1727 rsp
->sign_bit_copies
= num
;
1731 /* Called via note_stores. If X is a pseudo that is narrower than
1732 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1734 If we are setting only a portion of X and we can't figure out what
1735 portion, assume all bits will be used since we don't know what will
1738 Similarly, set how many bits of X are known to be copies of the sign bit
1739 at all locations in the function. This is the smallest number implied
1743 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1745 rtx_insn
*insn
= (rtx_insn
*) data
;
1746 scalar_int_mode mode
;
1749 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1750 /* If this register is undefined at the start of the file, we can't
1751 say what its contents were. */
1752 && ! REGNO_REG_SET_P
1753 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), REGNO (x
))
1754 && is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
1755 && HWI_COMPUTABLE_MODE_P (mode
))
1757 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
1759 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1761 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1762 rsp
->sign_bit_copies
= 1;
1766 /* If this register is being initialized using itself, and the
1767 register is uninitialized in this basic block, and there are
1768 no LOG_LINKS which set the register, then part of the
1769 register is uninitialized. In that case we can't assume
1770 anything about the number of nonzero bits.
1772 ??? We could do better if we checked this in
1773 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1774 could avoid making assumptions about the insn which initially
1775 sets the register, while still using the information in other
1776 insns. We would have to be careful to check every insn
1777 involved in the combination. */
1780 && reg_referenced_p (x
, PATTERN (insn
))
1781 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1784 struct insn_link
*link
;
1786 FOR_EACH_LOG_LINK (link
, insn
)
1787 if (dead_or_set_p (link
->insn
, x
))
1791 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1792 rsp
->sign_bit_copies
= 1;
1797 /* If this is a complex assignment, see if we can convert it into a
1798 simple assignment. */
1799 set
= expand_field_assignment (set
);
1801 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1802 set what we know about X. */
1804 if (SET_DEST (set
) == x
1805 || (paradoxical_subreg_p (SET_DEST (set
))
1806 && SUBREG_REG (SET_DEST (set
)) == x
))
1807 update_rsp_from_reg_equal (rsp
, insn
, set
, x
);
1810 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1811 rsp
->sign_bit_copies
= 1;
1816 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1817 optionally insns that were previously combined into I3 or that will be
1818 combined into the merger of INSN and I3. The order is PRED, PRED2,
1819 INSN, SUCC, SUCC2, I3.
1821 Return 0 if the combination is not allowed for any reason.
1823 If the combination is allowed, *PDEST will be set to the single
1824 destination of INSN and *PSRC to the single source, and this function
1828 can_combine_p (rtx_insn
*insn
, rtx_insn
*i3
, rtx_insn
*pred ATTRIBUTE_UNUSED
,
1829 rtx_insn
*pred2 ATTRIBUTE_UNUSED
, rtx_insn
*succ
, rtx_insn
*succ2
,
1830 rtx
*pdest
, rtx
*psrc
)
1837 bool all_adjacent
= true;
1838 int (*is_volatile_p
) (const_rtx
);
1844 if (next_active_insn (succ2
) != i3
)
1845 all_adjacent
= false;
1846 if (next_active_insn (succ
) != succ2
)
1847 all_adjacent
= false;
1849 else if (next_active_insn (succ
) != i3
)
1850 all_adjacent
= false;
1851 if (next_active_insn (insn
) != succ
)
1852 all_adjacent
= false;
1854 else if (next_active_insn (insn
) != i3
)
1855 all_adjacent
= false;
1857 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1858 or a PARALLEL consisting of such a SET and CLOBBERs.
1860 If INSN has CLOBBER parallel parts, ignore them for our processing.
1861 By definition, these happen during the execution of the insn. When it
1862 is merged with another insn, all bets are off. If they are, in fact,
1863 needed and aren't also supplied in I3, they may be added by
1864 recog_for_combine. Otherwise, it won't match.
1866 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1869 Get the source and destination of INSN. If more than one, can't
1872 if (GET_CODE (PATTERN (insn
)) == SET
)
1873 set
= PATTERN (insn
);
1874 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1875 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1877 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1879 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1881 switch (GET_CODE (elt
))
1883 /* This is important to combine floating point insns
1884 for the SH4 port. */
1886 /* Combining an isolated USE doesn't make sense.
1887 We depend here on combinable_i3pat to reject them. */
1888 /* The code below this loop only verifies that the inputs of
1889 the SET in INSN do not change. We call reg_set_between_p
1890 to verify that the REG in the USE does not change between
1892 If the USE in INSN was for a pseudo register, the matching
1893 insn pattern will likely match any register; combining this
1894 with any other USE would only be safe if we knew that the
1895 used registers have identical values, or if there was
1896 something to tell them apart, e.g. different modes. For
1897 now, we forgo such complicated tests and simply disallow
1898 combining of USES of pseudo registers with any other USE. */
1899 if (REG_P (XEXP (elt
, 0))
1900 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1902 rtx i3pat
= PATTERN (i3
);
1903 int i
= XVECLEN (i3pat
, 0) - 1;
1904 unsigned int regno
= REGNO (XEXP (elt
, 0));
1908 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1910 if (GET_CODE (i3elt
) == USE
1911 && REG_P (XEXP (i3elt
, 0))
1912 && (REGNO (XEXP (i3elt
, 0)) == regno
1913 ? reg_set_between_p (XEXP (elt
, 0),
1914 PREV_INSN (insn
), i3
)
1915 : regno
>= FIRST_PSEUDO_REGISTER
))
1922 /* We can ignore CLOBBERs. */
1927 /* Ignore SETs whose result isn't used but not those that
1928 have side-effects. */
1929 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1930 && insn_nothrow_p (insn
)
1931 && !side_effects_p (elt
))
1934 /* If we have already found a SET, this is a second one and
1935 so we cannot combine with this insn. */
1943 /* Anything else means we can't combine. */
1949 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1950 so don't do anything with it. */
1951 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1960 /* The simplification in expand_field_assignment may call back to
1961 get_last_value, so set safe guard here. */
1962 subst_low_luid
= DF_INSN_LUID (insn
);
1964 set
= expand_field_assignment (set
);
1965 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1967 /* Do not eliminate user-specified register if it is in an
1968 asm input because we may break the register asm usage defined
1969 in GCC manual if allow to do so.
1970 Be aware that this may cover more cases than we expect but this
1971 should be harmless. */
1972 if (REG_P (dest
) && REG_USERVAR_P (dest
) && HARD_REGISTER_P (dest
)
1973 && extract_asm_operands (PATTERN (i3
)))
1976 /* Don't eliminate a store in the stack pointer. */
1977 if (dest
== stack_pointer_rtx
1978 /* Don't combine with an insn that sets a register to itself if it has
1979 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1980 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1981 /* Can't merge an ASM_OPERANDS. */
1982 || GET_CODE (src
) == ASM_OPERANDS
1983 /* Can't merge a function call. */
1984 || GET_CODE (src
) == CALL
1985 /* Don't eliminate a function call argument. */
1987 && (find_reg_fusage (i3
, USE
, dest
)
1989 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1990 && global_regs
[REGNO (dest
)])))
1991 /* Don't substitute into an incremented register. */
1992 || FIND_REG_INC_NOTE (i3
, dest
)
1993 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1994 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1995 /* Don't substitute into a non-local goto, this confuses CFG. */
1996 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
1997 /* Make sure that DEST is not used after INSN but before SUCC, or
1998 after SUCC and before SUCC2, or after SUCC2 but before I3. */
2001 && (reg_used_between_p (dest
, succ2
, i3
)
2002 || reg_used_between_p (dest
, succ
, succ2
)))
2003 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))
2004 || (!succ2
&& !succ
&& reg_used_between_p (dest
, insn
, i3
))
2006 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
2007 that case SUCC is not in the insn stream, so use SUCC2
2008 instead for this test. */
2009 && reg_used_between_p (dest
, insn
,
2011 && INSN_UID (succ
) == INSN_UID (succ2
)
2013 /* Make sure that the value that is to be substituted for the register
2014 does not use any registers whose values alter in between. However,
2015 If the insns are adjacent, a use can't cross a set even though we
2016 think it might (this can happen for a sequence of insns each setting
2017 the same destination; last_set of that register might point to
2018 a NOTE). If INSN has a REG_EQUIV note, the register is always
2019 equivalent to the memory so the substitution is valid even if there
2020 are intervening stores. Also, don't move a volatile asm or
2021 UNSPEC_VOLATILE across any other insns. */
2024 || ! find_reg_note (insn
, REG_EQUIV
, src
))
2025 && modified_between_p (src
, insn
, i3
))
2026 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
2027 || GET_CODE (src
) == UNSPEC_VOLATILE
))
2028 /* Don't combine across a CALL_INSN, because that would possibly
2029 change whether the life span of some REGs crosses calls or not,
2030 and it is a pain to update that information.
2031 Exception: if source is a constant, moving it later can't hurt.
2032 Accept that as a special case. */
2033 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
2036 /* DEST must either be a REG or CC0. */
2039 /* If register alignment is being enforced for multi-word items in all
2040 cases except for parameters, it is possible to have a register copy
2041 insn referencing a hard register that is not allowed to contain the
2042 mode being copied and which would not be valid as an operand of most
2043 insns. Eliminate this problem by not combining with such an insn.
2045 Also, on some machines we don't want to extend the life of a hard
2049 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
2050 && !targetm
.hard_regno_mode_ok (REGNO (dest
), GET_MODE (dest
)))
2051 /* Don't extend the life of a hard register unless it is
2052 user variable (if we have few registers) or it can't
2053 fit into the desired register (meaning something special
2055 Also avoid substituting a return register into I3, because
2056 reload can't handle a conflict with constraints of other
2058 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
2059 && !targetm
.hard_regno_mode_ok (REGNO (src
),
2063 else if (GET_CODE (dest
) != CC0
)
2067 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
2068 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
2069 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
2071 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
2073 /* If the clobber represents an earlyclobber operand, we must not
2074 substitute an expression containing the clobbered register.
2075 As we do not analyze the constraint strings here, we have to
2076 make the conservative assumption. However, if the register is
2077 a fixed hard reg, the clobber cannot represent any operand;
2078 we leave it up to the machine description to either accept or
2079 reject use-and-clobber patterns. */
2081 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
2082 || !fixed_regs
[REGNO (reg
)])
2083 if (reg_overlap_mentioned_p (reg
, src
))
2087 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2088 or not), reject, unless nothing volatile comes between it and I3 */
2090 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
2092 /* Make sure neither succ nor succ2 contains a volatile reference. */
2093 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
2095 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
2097 /* We'll check insns between INSN and I3 below. */
2100 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2101 to be an explicit register variable, and was chosen for a reason. */
2103 if (GET_CODE (src
) == ASM_OPERANDS
2104 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
2107 /* If INSN contains volatile references (specifically volatile MEMs),
2108 we cannot combine across any other volatile references.
2109 Even if INSN doesn't contain volatile references, any intervening
2110 volatile insn might affect machine state. */
2112 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
2116 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
2117 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
2120 /* If INSN contains an autoincrement or autodecrement, make sure that
2121 register is not used between there and I3, and not already used in
2122 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2123 Also insist that I3 not be a jump; if it were one
2124 and the incremented register were spilled, we would lose. */
2127 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2128 if (REG_NOTE_KIND (link
) == REG_INC
2130 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
2131 || (pred
!= NULL_RTX
2132 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
2133 || (pred2
!= NULL_RTX
2134 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
2135 || (succ
!= NULL_RTX
2136 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
2137 || (succ2
!= NULL_RTX
2138 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
2139 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
2142 /* Don't combine an insn that follows a CC0-setting insn.
2143 An insn that uses CC0 must not be separated from the one that sets it.
2144 We do, however, allow I2 to follow a CC0-setting insn if that insn
2145 is passed as I1; in that case it will be deleted also.
2146 We also allow combining in this case if all the insns are adjacent
2147 because that would leave the two CC0 insns adjacent as well.
2148 It would be more logical to test whether CC0 occurs inside I1 or I2,
2149 but that would be much slower, and this ought to be equivalent. */
2153 p
= prev_nonnote_insn (insn
);
2154 if (p
&& p
!= pred
&& NONJUMP_INSN_P (p
) && sets_cc0_p (PATTERN (p
))
2159 /* If we get here, we have passed all the tests and the combination is
2168 /* LOC is the location within I3 that contains its pattern or the component
2169 of a PARALLEL of the pattern. We validate that it is valid for combining.
2171 One problem is if I3 modifies its output, as opposed to replacing it
2172 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2173 doing so would produce an insn that is not equivalent to the original insns.
2177 (set (reg:DI 101) (reg:DI 100))
2178 (set (subreg:SI (reg:DI 101) 0) <foo>)
2180 This is NOT equivalent to:
2182 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2183 (set (reg:DI 101) (reg:DI 100))])
2185 Not only does this modify 100 (in which case it might still be valid
2186 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2188 We can also run into a problem if I2 sets a register that I1
2189 uses and I1 gets directly substituted into I3 (not via I2). In that
2190 case, we would be getting the wrong value of I2DEST into I3, so we
2191 must reject the combination. This case occurs when I2 and I1 both
2192 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2193 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2194 of a SET must prevent combination from occurring. The same situation
2195 can occur for I0, in which case I0_NOT_IN_SRC is set.
2197 Before doing the above check, we first try to expand a field assignment
2198 into a set of logical operations.
2200 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2201 we place a register that is both set and used within I3. If more than one
2202 such register is detected, we fail.
2204 Return 1 if the combination is valid, zero otherwise. */
2207 combinable_i3pat (rtx_insn
*i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2208 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2212 if (GET_CODE (x
) == SET
)
2215 rtx dest
= SET_DEST (set
);
2216 rtx src
= SET_SRC (set
);
2217 rtx inner_dest
= dest
;
2220 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2221 || GET_CODE (inner_dest
) == SUBREG
2222 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2223 inner_dest
= XEXP (inner_dest
, 0);
2225 /* Check for the case where I3 modifies its output, as discussed
2226 above. We don't want to prevent pseudos from being combined
2227 into the address of a MEM, so only prevent the combination if
2228 i1 or i2 set the same MEM. */
2229 if ((inner_dest
!= dest
&&
2230 (!MEM_P (inner_dest
)
2231 || rtx_equal_p (i2dest
, inner_dest
)
2232 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2233 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2234 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2235 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2236 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2238 /* This is the same test done in can_combine_p except we can't test
2239 all_adjacent; we don't have to, since this instruction will stay
2240 in place, thus we are not considering increasing the lifetime of
2243 Also, if this insn sets a function argument, combining it with
2244 something that might need a spill could clobber a previous
2245 function argument; the all_adjacent test in can_combine_p also
2246 checks this; here, we do a more specific test for this case. */
2248 || (REG_P (inner_dest
)
2249 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2250 && !targetm
.hard_regno_mode_ok (REGNO (inner_dest
),
2251 GET_MODE (inner_dest
)))
2252 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2253 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2256 /* If DEST is used in I3, it is being killed in this insn, so
2257 record that for later. We have to consider paradoxical
2258 subregs here, since they kill the whole register, but we
2259 ignore partial subregs, STRICT_LOW_PART, etc.
2260 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2261 STACK_POINTER_REGNUM, since these are always considered to be
2262 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2264 if (GET_CODE (subdest
) == SUBREG
&& !partial_subreg_p (subdest
))
2265 subdest
= SUBREG_REG (subdest
);
2268 && reg_referenced_p (subdest
, PATTERN (i3
))
2269 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2270 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2271 || REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
)
2272 && (FRAME_POINTER_REGNUM
== ARG_POINTER_REGNUM
2273 || (REGNO (subdest
) != ARG_POINTER_REGNUM
2274 || ! fixed_regs
[REGNO (subdest
)]))
2275 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2277 if (*pi3dest_killed
)
2280 *pi3dest_killed
= subdest
;
2284 else if (GET_CODE (x
) == PARALLEL
)
2288 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2289 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2290 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2297 /* Return 1 if X is an arithmetic expression that contains a multiplication
2298 and division. We don't count multiplications by powers of two here. */
2301 contains_muldiv (rtx x
)
2303 switch (GET_CODE (x
))
2305 case MOD
: case DIV
: case UMOD
: case UDIV
:
2309 return ! (CONST_INT_P (XEXP (x
, 1))
2310 && pow2p_hwi (UINTVAL (XEXP (x
, 1))));
2313 return contains_muldiv (XEXP (x
, 0))
2314 || contains_muldiv (XEXP (x
, 1));
2317 return contains_muldiv (XEXP (x
, 0));
2323 /* Determine whether INSN can be used in a combination. Return nonzero if
2324 not. This is used in try_combine to detect early some cases where we
2325 can't perform combinations. */
2328 cant_combine_insn_p (rtx_insn
*insn
)
2333 /* If this isn't really an insn, we can't do anything.
2334 This can occur when flow deletes an insn that it has merged into an
2335 auto-increment address. */
2336 if (!NONDEBUG_INSN_P (insn
))
2339 /* Never combine loads and stores involving hard regs that are likely
2340 to be spilled. The register allocator can usually handle such
2341 reg-reg moves by tying. If we allow the combiner to make
2342 substitutions of likely-spilled regs, reload might die.
2343 As an exception, we allow combinations involving fixed regs; these are
2344 not available to the register allocator so there's no risk involved. */
2346 set
= single_set (insn
);
2349 src
= SET_SRC (set
);
2350 dest
= SET_DEST (set
);
2351 if (GET_CODE (src
) == SUBREG
)
2352 src
= SUBREG_REG (src
);
2353 if (GET_CODE (dest
) == SUBREG
)
2354 dest
= SUBREG_REG (dest
);
2355 if (REG_P (src
) && REG_P (dest
)
2356 && ((HARD_REGISTER_P (src
)
2357 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2358 #ifdef LEAF_REGISTERS
2359 && ! LEAF_REGISTERS
[REGNO (src
)])
2363 || (HARD_REGISTER_P (dest
)
2364 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2365 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2371 struct likely_spilled_retval_info
2373 unsigned regno
, nregs
;
2377 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2378 hard registers that are known to be written to / clobbered in full. */
2380 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2382 struct likely_spilled_retval_info
*const info
=
2383 (struct likely_spilled_retval_info
*) data
;
2384 unsigned regno
, nregs
;
2387 if (!REG_P (XEXP (set
, 0)))
2390 if (regno
>= info
->regno
+ info
->nregs
)
2392 nregs
= REG_NREGS (x
);
2393 if (regno
+ nregs
<= info
->regno
)
2395 new_mask
= (2U << (nregs
- 1)) - 1;
2396 if (regno
< info
->regno
)
2397 new_mask
>>= info
->regno
- regno
;
2399 new_mask
<<= regno
- info
->regno
;
2400 info
->mask
&= ~new_mask
;
2403 /* Return nonzero iff part of the return value is live during INSN, and
2404 it is likely spilled. This can happen when more than one insn is needed
2405 to copy the return value, e.g. when we consider to combine into the
2406 second copy insn for a complex value. */
2409 likely_spilled_retval_p (rtx_insn
*insn
)
2411 rtx_insn
*use
= BB_END (this_basic_block
);
2414 unsigned regno
, nregs
;
2415 /* We assume here that no machine mode needs more than
2416 32 hard registers when the value overlaps with a register
2417 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2419 struct likely_spilled_retval_info info
;
2421 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2423 reg
= XEXP (PATTERN (use
), 0);
2424 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2426 regno
= REGNO (reg
);
2427 nregs
= REG_NREGS (reg
);
2430 mask
= (2U << (nregs
- 1)) - 1;
2432 /* Disregard parts of the return value that are set later. */
2436 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2438 note_stores (p
, likely_spilled_retval_1
, &info
);
2441 /* Check if any of the (probably) live return value registers is
2446 if ((mask
& 1 << nregs
)
2447 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2453 /* Adjust INSN after we made a change to its destination.
2455 Changing the destination can invalidate notes that say something about
2456 the results of the insn and a LOG_LINK pointing to the insn. */
2459 adjust_for_new_dest (rtx_insn
*insn
)
2461 /* For notes, be conservative and simply remove them. */
2462 remove_reg_equal_equiv_notes (insn
);
2464 /* The new insn will have a destination that was previously the destination
2465 of an insn just above it. Call distribute_links to make a LOG_LINK from
2466 the next use of that destination. */
2468 rtx set
= single_set (insn
);
2471 rtx reg
= SET_DEST (set
);
2473 while (GET_CODE (reg
) == ZERO_EXTRACT
2474 || GET_CODE (reg
) == STRICT_LOW_PART
2475 || GET_CODE (reg
) == SUBREG
)
2476 reg
= XEXP (reg
, 0);
2477 gcc_assert (REG_P (reg
));
2479 distribute_links (alloc_insn_link (insn
, REGNO (reg
), NULL
));
2481 df_insn_rescan (insn
);
2484 /* Return TRUE if combine can reuse reg X in mode MODE.
2485 ADDED_SETS is nonzero if the original set is still required. */
2487 can_change_dest_mode (rtx x
, int added_sets
, machine_mode mode
)
2494 /* Don't change between modes with different underlying register sizes,
2495 since this could lead to invalid subregs. */
2496 if (maybe_ne (REGMODE_NATURAL_SIZE (mode
),
2497 REGMODE_NATURAL_SIZE (GET_MODE (x
))))
2501 /* Allow hard registers if the new mode is legal, and occupies no more
2502 registers than the old mode. */
2503 if (regno
< FIRST_PSEUDO_REGISTER
)
2504 return (targetm
.hard_regno_mode_ok (regno
, mode
)
2505 && REG_NREGS (x
) >= hard_regno_nregs (regno
, mode
));
2507 /* Or a pseudo that is only used once. */
2508 return (regno
< reg_n_sets_max
2509 && REG_N_SETS (regno
) == 1
2511 && !REG_USERVAR_P (x
));
2515 /* Check whether X, the destination of a set, refers to part of
2516 the register specified by REG. */
2519 reg_subword_p (rtx x
, rtx reg
)
2521 /* Check that reg is an integer mode register. */
2522 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2525 if (GET_CODE (x
) == STRICT_LOW_PART
2526 || GET_CODE (x
) == ZERO_EXTRACT
)
2529 return GET_CODE (x
) == SUBREG
2530 && SUBREG_REG (x
) == reg
2531 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2534 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2535 Note that the INSN should be deleted *after* removing dead edges, so
2536 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2537 but not for a (set (pc) (label_ref FOO)). */
2540 update_cfg_for_uncondjump (rtx_insn
*insn
)
2542 basic_block bb
= BLOCK_FOR_INSN (insn
);
2543 gcc_assert (BB_END (bb
) == insn
);
2545 purge_dead_edges (bb
);
2548 if (EDGE_COUNT (bb
->succs
) == 1)
2552 single_succ_edge (bb
)->flags
|= EDGE_FALLTHRU
;
2554 /* Remove barriers from the footer if there are any. */
2555 for (insn
= BB_FOOTER (bb
); insn
; insn
= NEXT_INSN (insn
))
2556 if (BARRIER_P (insn
))
2558 if (PREV_INSN (insn
))
2559 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
2561 BB_FOOTER (bb
) = NEXT_INSN (insn
);
2562 if (NEXT_INSN (insn
))
2563 SET_PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
2565 else if (LABEL_P (insn
))
2570 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2571 by an arbitrary number of CLOBBERs. */
2573 is_parallel_of_n_reg_sets (rtx pat
, int n
)
2575 if (GET_CODE (pat
) != PARALLEL
)
2578 int len
= XVECLEN (pat
, 0);
2583 for (i
= 0; i
< n
; i
++)
2584 if (GET_CODE (XVECEXP (pat
, 0, i
)) != SET
2585 || !REG_P (SET_DEST (XVECEXP (pat
, 0, i
))))
2587 for ( ; i
< len
; i
++)
2588 switch (GET_CODE (XVECEXP (pat
, 0, i
)))
2591 if (XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
2600 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2601 CLOBBERs), can be split into individual SETs in that order, without
2602 changing semantics. */
2604 can_split_parallel_of_n_reg_sets (rtx_insn
*insn
, int n
)
2606 if (!insn_nothrow_p (insn
))
2609 rtx pat
= PATTERN (insn
);
2612 for (i
= 0; i
< n
; i
++)
2614 if (side_effects_p (SET_SRC (XVECEXP (pat
, 0, i
))))
2617 rtx reg
= SET_DEST (XVECEXP (pat
, 0, i
));
2619 for (j
= i
+ 1; j
< n
; j
++)
2620 if (reg_referenced_p (reg
, XVECEXP (pat
, 0, j
)))
2627 /* Return whether X is just a single set, with the source
2628 a general_operand. */
2630 is_just_move (rtx x
)
2635 return (GET_CODE (x
) == SET
&& general_operand (SET_SRC (x
), VOIDmode
));
2638 /* Callback function to count autoincs. */
2641 count_auto_inc (rtx
, rtx
, rtx
, rtx
, rtx
, void *arg
)
2648 /* Try to combine the insns I0, I1 and I2 into I3.
2649 Here I0, I1 and I2 appear earlier than I3.
2650 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2653 If we are combining more than two insns and the resulting insn is not
2654 recognized, try splitting it into two insns. If that happens, I2 and I3
2655 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2656 Otherwise, I0, I1 and I2 are pseudo-deleted.
2658 Return 0 if the combination does not work. Then nothing is changed.
2659 If we did the combination, return the insn at which combine should
2662 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2663 new direct jump instruction.
2665 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2666 been I3 passed to an earlier try_combine within the same basic
2670 try_combine (rtx_insn
*i3
, rtx_insn
*i2
, rtx_insn
*i1
, rtx_insn
*i0
,
2671 int *new_direct_jump_p
, rtx_insn
*last_combined_insn
)
2673 /* New patterns for I3 and I2, respectively. */
2674 rtx newpat
, newi2pat
= 0;
2675 rtvec newpat_vec_with_clobbers
= 0;
2676 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2677 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2679 int added_sets_0
, added_sets_1
, added_sets_2
;
2680 /* Total number of SETs to put into I3. */
2682 /* Nonzero if I2's or I1's body now appears in I3. */
2683 int i2_is_used
= 0, i1_is_used
= 0;
2684 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2685 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2686 /* Contains I3 if the destination of I3 is used in its source, which means
2687 that the old life of I3 is being killed. If that usage is placed into
2688 I2 and not in I3, a REG_DEAD note must be made. */
2689 rtx i3dest_killed
= 0;
2690 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2691 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2692 /* Copy of SET_SRC of I1 and I0, if needed. */
2693 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2694 /* Set if I2DEST was reused as a scratch register. */
2695 bool i2scratch
= false;
2696 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2697 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2698 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2699 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2700 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2701 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2702 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2703 /* Notes that must be added to REG_NOTES in I3 and I2. */
2704 rtx new_i3_notes
, new_i2_notes
;
2705 /* Notes that we substituted I3 into I2 instead of the normal case. */
2706 int i3_subst_into_i2
= 0;
2707 /* Notes that I1, I2 or I3 is a MULT operation. */
2711 int changed_i3_dest
= 0;
2712 bool i2_was_move
= false, i3_was_move
= false;
2716 rtx_insn
*temp_insn
;
2718 struct insn_link
*link
;
2720 rtx new_other_notes
;
2722 scalar_int_mode dest_mode
, temp_mode
;
2724 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2726 if (i1
== i2
|| i0
== i2
|| (i0
&& i0
== i1
))
2729 /* Only try four-insn combinations when there's high likelihood of
2730 success. Look for simple insns, such as loads of constants or
2731 binary operations involving a constant. */
2739 if (!flag_expensive_optimizations
)
2742 for (i
= 0; i
< 4; i
++)
2744 rtx_insn
*insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2745 rtx set
= single_set (insn
);
2749 src
= SET_SRC (set
);
2750 if (CONSTANT_P (src
))
2755 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2757 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2758 || GET_CODE (src
) == LSHIFTRT
)
2762 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2763 are likely manipulating its value. Ideally we'll be able to combine
2764 all four insns into a bitfield insertion of some kind.
2766 Note the source in I0 might be inside a sign/zero extension and the
2767 memory modes in I0 and I3 might be different. So extract the address
2768 from the destination of I3 and search for it in the source of I0.
2770 In the event that there's a match but the source/dest do not actually
2771 refer to the same memory, the worst that happens is we try some
2772 combinations that we wouldn't have otherwise. */
2773 if ((set0
= single_set (i0
))
2774 /* Ensure the source of SET0 is a MEM, possibly buried inside
2776 && (GET_CODE (SET_SRC (set0
)) == MEM
2777 || ((GET_CODE (SET_SRC (set0
)) == ZERO_EXTEND
2778 || GET_CODE (SET_SRC (set0
)) == SIGN_EXTEND
)
2779 && GET_CODE (XEXP (SET_SRC (set0
), 0)) == MEM
))
2780 && (set3
= single_set (i3
))
2781 /* Ensure the destination of SET3 is a MEM. */
2782 && GET_CODE (SET_DEST (set3
)) == MEM
2783 /* Would it be better to extract the base address for the MEM
2784 in SET3 and look for that? I don't have cases where it matters
2785 but I could envision such cases. */
2786 && rtx_referenced_p (XEXP (SET_DEST (set3
), 0), SET_SRC (set0
)))
2789 if (ngood
< 2 && nshift
< 2)
2793 /* Exit early if one of the insns involved can't be used for
2796 || (i1
&& CALL_P (i1
))
2797 || (i0
&& CALL_P (i0
))
2798 || cant_combine_insn_p (i3
)
2799 || cant_combine_insn_p (i2
)
2800 || (i1
&& cant_combine_insn_p (i1
))
2801 || (i0
&& cant_combine_insn_p (i0
))
2802 || likely_spilled_retval_p (i3
))
2806 undobuf
.other_insn
= 0;
2808 /* Reset the hard register usage information. */
2809 CLEAR_HARD_REG_SET (newpat_used_regs
);
2811 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2814 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2815 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2817 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2818 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2820 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2821 INSN_UID (i2
), INSN_UID (i3
));
2824 dump_insn_slim (dump_file
, i0
);
2826 dump_insn_slim (dump_file
, i1
);
2827 dump_insn_slim (dump_file
, i2
);
2828 dump_insn_slim (dump_file
, i3
);
2831 /* If multiple insns feed into one of I2 or I3, they can be in any
2832 order. To simplify the code below, reorder them in sequence. */
2833 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2835 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2837 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2840 added_links_insn
= 0;
2841 added_notes_insn
= 0;
2843 /* First check for one important special case that the code below will
2844 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2845 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2846 we may be able to replace that destination with the destination of I3.
2847 This occurs in the common code where we compute both a quotient and
2848 remainder into a structure, in which case we want to do the computation
2849 directly into the structure to avoid register-register copies.
2851 Note that this case handles both multiple sets in I2 and also cases
2852 where I2 has a number of CLOBBERs inside the PARALLEL.
2854 We make very conservative checks below and only try to handle the
2855 most common cases of this. For example, we only handle the case
2856 where I2 and I3 are adjacent to avoid making difficult register
2859 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2860 && REG_P (SET_SRC (PATTERN (i3
)))
2861 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2862 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2863 && GET_CODE (PATTERN (i2
)) == PARALLEL
2864 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2865 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2866 below would need to check what is inside (and reg_overlap_mentioned_p
2867 doesn't support those codes anyway). Don't allow those destinations;
2868 the resulting insn isn't likely to be recognized anyway. */
2869 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2870 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2871 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2872 SET_DEST (PATTERN (i3
)))
2873 && next_active_insn (i2
) == i3
)
2875 rtx p2
= PATTERN (i2
);
2877 /* Make sure that the destination of I3,
2878 which we are going to substitute into one output of I2,
2879 is not used within another output of I2. We must avoid making this:
2880 (parallel [(set (mem (reg 69)) ...)
2881 (set (reg 69) ...)])
2882 which is not well-defined as to order of actions.
2883 (Besides, reload can't handle output reloads for this.)
2885 The problem can also happen if the dest of I3 is a memory ref,
2886 if another dest in I2 is an indirect memory ref.
2888 Neither can this PARALLEL be an asm. We do not allow combining
2889 that usually (see can_combine_p), so do not here either. */
2891 for (i
= 0; ok
&& i
< XVECLEN (p2
, 0); i
++)
2893 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2894 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
)
2895 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2896 SET_DEST (XVECEXP (p2
, 0, i
))))
2898 else if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2899 && GET_CODE (SET_SRC (XVECEXP (p2
, 0, i
))) == ASM_OPERANDS
)
2904 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2905 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2906 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2911 subst_low_luid
= DF_INSN_LUID (i2
);
2913 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2914 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2915 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2916 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2918 /* Replace the dest in I2 with our dest and make the resulting
2919 insn the new pattern for I3. Then skip to where we validate
2920 the pattern. Everything was set up above. */
2921 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2923 i3_subst_into_i2
= 1;
2924 goto validate_replacement
;
2928 /* If I2 is setting a pseudo to a constant and I3 is setting some
2929 sub-part of it to another constant, merge them by making a new
2932 && (temp_expr
= single_set (i2
)) != 0
2933 && is_a
<scalar_int_mode
> (GET_MODE (SET_DEST (temp_expr
)), &temp_mode
)
2934 && CONST_SCALAR_INT_P (SET_SRC (temp_expr
))
2935 && GET_CODE (PATTERN (i3
)) == SET
2936 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3
)))
2937 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp_expr
)))
2939 rtx dest
= SET_DEST (PATTERN (i3
));
2940 rtx temp_dest
= SET_DEST (temp_expr
);
2944 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2946 if (CONST_INT_P (XEXP (dest
, 1))
2947 && CONST_INT_P (XEXP (dest
, 2))
2948 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (dest
, 0)),
2951 width
= INTVAL (XEXP (dest
, 1));
2952 offset
= INTVAL (XEXP (dest
, 2));
2953 dest
= XEXP (dest
, 0);
2954 if (BITS_BIG_ENDIAN
)
2955 offset
= GET_MODE_PRECISION (dest_mode
) - width
- offset
;
2960 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2961 dest
= XEXP (dest
, 0);
2962 if (is_a
<scalar_int_mode
> (GET_MODE (dest
), &dest_mode
))
2964 width
= GET_MODE_PRECISION (dest_mode
);
2971 /* If this is the low part, we're done. */
2972 if (subreg_lowpart_p (dest
))
2974 /* Handle the case where inner is twice the size of outer. */
2975 else if (GET_MODE_PRECISION (temp_mode
)
2976 == 2 * GET_MODE_PRECISION (dest_mode
))
2977 offset
+= GET_MODE_PRECISION (dest_mode
);
2978 /* Otherwise give up for now. */
2985 rtx inner
= SET_SRC (PATTERN (i3
));
2986 rtx outer
= SET_SRC (temp_expr
);
2988 wide_int o
= wi::insert (rtx_mode_t (outer
, temp_mode
),
2989 rtx_mode_t (inner
, dest_mode
),
2994 subst_low_luid
= DF_INSN_LUID (i2
);
2995 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2997 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2999 /* Replace the source in I2 with the new constant and make the
3000 resulting insn the new pattern for I3. Then skip to where we
3001 validate the pattern. Everything was set up above. */
3002 SUBST (SET_SRC (temp_expr
),
3003 immed_wide_int_const (o
, temp_mode
));
3005 newpat
= PATTERN (i2
);
3007 /* The dest of I3 has been replaced with the dest of I2. */
3008 changed_i3_dest
= 1;
3009 goto validate_replacement
;
3013 /* If we have no I1 and I2 looks like:
3014 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
3016 make up a dummy I1 that is
3019 (set (reg:CC X) (compare:CC Y (const_int 0)))
3021 (We can ignore any trailing CLOBBERs.)
3023 This undoes a previous combination and allows us to match a branch-and-
3026 if (!HAVE_cc0
&& i1
== 0
3027 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
3028 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
3030 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
3031 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
3032 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
3033 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1)))
3034 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
3035 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
3037 /* We make I1 with the same INSN_UID as I2. This gives it
3038 the same DF_INSN_LUID for value tracking. Our fake I1 will
3039 never appear in the insn stream so giving it the same INSN_UID
3040 as I2 will not cause a problem. */
3042 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
3043 XVECEXP (PATTERN (i2
), 0, 1), INSN_LOCATION (i2
),
3045 INSN_UID (i1
) = INSN_UID (i2
);
3047 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
3048 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
3049 SET_DEST (PATTERN (i1
)));
3050 unsigned int regno
= REGNO (SET_DEST (PATTERN (i1
)));
3051 SUBST_LINK (LOG_LINKS (i2
),
3052 alloc_insn_link (i1
, regno
, LOG_LINKS (i2
)));
3055 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3056 make those two SETs separate I1 and I2 insns, and make an I0 that is
3058 if (!HAVE_cc0
&& i0
== 0
3059 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
3060 && can_split_parallel_of_n_reg_sets (i2
, 2)
3061 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
3062 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
)
3063 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
3064 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
3066 /* If there is no I1, there is no I0 either. */
3069 /* We make I1 with the same INSN_UID as I2. This gives it
3070 the same DF_INSN_LUID for value tracking. Our fake I1 will
3071 never appear in the insn stream so giving it the same INSN_UID
3072 as I2 will not cause a problem. */
3074 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
3075 XVECEXP (PATTERN (i2
), 0, 0), INSN_LOCATION (i2
),
3077 INSN_UID (i1
) = INSN_UID (i2
);
3079 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 1));
3082 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
3083 if (!can_combine_p (i2
, i3
, i0
, i1
, NULL
, NULL
, &i2dest
, &i2src
))
3085 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3086 fprintf (dump_file
, "Can't combine i2 into i3\n");
3090 if (i1
&& !can_combine_p (i1
, i3
, i0
, NULL
, i2
, NULL
, &i1dest
, &i1src
))
3092 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3093 fprintf (dump_file
, "Can't combine i1 into i3\n");
3097 if (i0
&& !can_combine_p (i0
, i3
, NULL
, NULL
, i1
, i2
, &i0dest
, &i0src
))
3099 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3100 fprintf (dump_file
, "Can't combine i0 into i3\n");
3105 /* Record whether i2 and i3 are trivial moves. */
3106 i2_was_move
= is_just_move (i2
);
3107 i3_was_move
= is_just_move (i3
);
3109 /* Record whether I2DEST is used in I2SRC and similarly for the other
3110 cases. Knowing this will help in register status updating below. */
3111 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
3112 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
3113 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
3114 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
3115 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
3116 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
3117 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
3118 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
3119 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
3121 /* For the earlier insns, determine which of the subsequent ones they
3123 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
3124 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
3125 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
3126 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
3127 && reg_overlap_mentioned_p (i0dest
, i2src
))));
3129 /* Ensure that I3's pattern can be the destination of combines. */
3130 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
3131 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
3132 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
3133 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
3140 /* See if any of the insns is a MULT operation. Unless one is, we will
3141 reject a combination that is, since it must be slower. Be conservative
3143 if (GET_CODE (i2src
) == MULT
3144 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
3145 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
3146 || (GET_CODE (PATTERN (i3
)) == SET
3147 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
3150 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3151 We used to do this EXCEPT in one case: I3 has a post-inc in an
3152 output operand. However, that exception can give rise to insns like
3154 which is a famous insn on the PDP-11 where the value of r3 used as the
3155 source was model-dependent. Avoid this sort of thing. */
3158 if (!(GET_CODE (PATTERN (i3
)) == SET
3159 && REG_P (SET_SRC (PATTERN (i3
)))
3160 && MEM_P (SET_DEST (PATTERN (i3
)))
3161 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
3162 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
3163 /* It's not the exception. */
3168 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
3169 if (REG_NOTE_KIND (link
) == REG_INC
3170 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
3172 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
3179 /* See if the SETs in I1 or I2 need to be kept around in the merged
3180 instruction: whenever the value set there is still needed past I3.
3181 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3183 For the SET in I1, we have two cases: if I1 and I2 independently feed
3184 into I3, the set in I1 needs to be kept around unless I1DEST dies
3185 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3186 in I1 needs to be kept around unless I1DEST dies or is set in either
3187 I2 or I3. The same considerations apply to I0. */
3189 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
3192 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
3193 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
3198 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
3199 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
))
3200 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3201 && dead_or_set_p (i2
, i0dest
)));
3205 /* We are about to copy insns for the case where they need to be kept
3206 around. Check that they can be copied in the merged instruction. */
3208 if (targetm
.cannot_copy_insn_p
3209 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
3210 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
3211 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
3217 /* Count how many auto_inc expressions there were in the original insns;
3218 we need to have the same number in the resulting patterns. */
3221 for_each_inc_dec (PATTERN (i0
), count_auto_inc
, &n_auto_inc
);
3223 for_each_inc_dec (PATTERN (i1
), count_auto_inc
, &n_auto_inc
);
3224 for_each_inc_dec (PATTERN (i2
), count_auto_inc
, &n_auto_inc
);
3225 for_each_inc_dec (PATTERN (i3
), count_auto_inc
, &n_auto_inc
);
3227 /* If the set in I2 needs to be kept around, we must make a copy of
3228 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3229 PATTERN (I2), we are only substituting for the original I1DEST, not into
3230 an already-substituted copy. This also prevents making self-referential
3231 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3236 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3237 i2pat
= gen_rtx_SET (i2dest
, copy_rtx (i2src
));
3239 i2pat
= copy_rtx (PATTERN (i2
));
3244 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3245 i1pat
= gen_rtx_SET (i1dest
, copy_rtx (i1src
));
3247 i1pat
= copy_rtx (PATTERN (i1
));
3252 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3253 i0pat
= gen_rtx_SET (i0dest
, copy_rtx (i0src
));
3255 i0pat
= copy_rtx (PATTERN (i0
));
3260 /* Substitute in the latest insn for the regs set by the earlier ones. */
3262 maxreg
= max_reg_num ();
3266 /* Many machines that don't use CC0 have insns that can both perform an
3267 arithmetic operation and set the condition code. These operations will
3268 be represented as a PARALLEL with the first element of the vector
3269 being a COMPARE of an arithmetic operation with the constant zero.
3270 The second element of the vector will set some pseudo to the result
3271 of the same arithmetic operation. If we simplify the COMPARE, we won't
3272 match such a pattern and so will generate an extra insn. Here we test
3273 for this case, where both the comparison and the operation result are
3274 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3275 I2SRC. Later we will make the PARALLEL that contains I2. */
3277 if (!HAVE_cc0
&& i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3278 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3279 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3
)), 1))
3280 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3283 rtx
*cc_use_loc
= NULL
;
3284 rtx_insn
*cc_use_insn
= NULL
;
3285 rtx op0
= i2src
, op1
= XEXP (SET_SRC (PATTERN (i3
)), 1);
3286 machine_mode compare_mode
, orig_compare_mode
;
3287 enum rtx_code compare_code
= UNKNOWN
, orig_compare_code
= UNKNOWN
;
3288 scalar_int_mode mode
;
3290 newpat
= PATTERN (i3
);
3291 newpat_dest
= SET_DEST (newpat
);
3292 compare_mode
= orig_compare_mode
= GET_MODE (newpat_dest
);
3294 if (undobuf
.other_insn
== 0
3295 && (cc_use_loc
= find_single_use (SET_DEST (newpat
), i3
,
3298 compare_code
= orig_compare_code
= GET_CODE (*cc_use_loc
);
3299 if (is_a
<scalar_int_mode
> (GET_MODE (i2dest
), &mode
))
3300 compare_code
= simplify_compare_const (compare_code
, mode
,
3302 target_canonicalize_comparison (&compare_code
, &op0
, &op1
, 1);
3305 /* Do the rest only if op1 is const0_rtx, which may be the
3306 result of simplification. */
3307 if (op1
== const0_rtx
)
3309 /* If a single use of the CC is found, prepare to modify it
3310 when SELECT_CC_MODE returns a new CC-class mode, or when
3311 the above simplify_compare_const() returned a new comparison
3312 operator. undobuf.other_insn is assigned the CC use insn
3313 when modifying it. */
3316 #ifdef SELECT_CC_MODE
3317 machine_mode new_mode
3318 = SELECT_CC_MODE (compare_code
, op0
, op1
);
3319 if (new_mode
!= orig_compare_mode
3320 && can_change_dest_mode (SET_DEST (newpat
),
3321 added_sets_2
, new_mode
))
3323 unsigned int regno
= REGNO (newpat_dest
);
3324 compare_mode
= new_mode
;
3325 if (regno
< FIRST_PSEUDO_REGISTER
)
3326 newpat_dest
= gen_rtx_REG (compare_mode
, regno
);
3329 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3330 newpat_dest
= regno_reg_rtx
[regno
];
3334 /* Cases for modifying the CC-using comparison. */
3335 if (compare_code
!= orig_compare_code
3336 /* ??? Do we need to verify the zero rtx? */
3337 && XEXP (*cc_use_loc
, 1) == const0_rtx
)
3339 /* Replace cc_use_loc with entire new RTX. */
3341 gen_rtx_fmt_ee (compare_code
, GET_MODE (*cc_use_loc
),
3342 newpat_dest
, const0_rtx
));
3343 undobuf
.other_insn
= cc_use_insn
;
3345 else if (compare_mode
!= orig_compare_mode
)
3347 /* Just replace the CC reg with a new mode. */
3348 SUBST (XEXP (*cc_use_loc
, 0), newpat_dest
);
3349 undobuf
.other_insn
= cc_use_insn
;
3353 /* Now we modify the current newpat:
3354 First, SET_DEST(newpat) is updated if the CC mode has been
3355 altered. For targets without SELECT_CC_MODE, this should be
3357 if (compare_mode
!= orig_compare_mode
)
3358 SUBST (SET_DEST (newpat
), newpat_dest
);
3359 /* This is always done to propagate i2src into newpat. */
3360 SUBST (SET_SRC (newpat
),
3361 gen_rtx_COMPARE (compare_mode
, op0
, op1
));
3362 /* Create new version of i2pat if needed; the below PARALLEL
3363 creation needs this to work correctly. */
3364 if (! rtx_equal_p (i2src
, op0
))
3365 i2pat
= gen_rtx_SET (i2dest
, op0
);
3370 if (i2_is_used
== 0)
3372 /* It is possible that the source of I2 or I1 may be performing
3373 an unneeded operation, such as a ZERO_EXTEND of something
3374 that is known to have the high part zero. Handle that case
3375 by letting subst look at the inner insns.
3377 Another way to do this would be to have a function that tries
3378 to simplify a single insn instead of merging two or more
3379 insns. We don't do this because of the potential of infinite
3380 loops and because of the potential extra memory required.
3381 However, doing it the way we are is a bit of a kludge and
3382 doesn't catch all cases.
3384 But only do this if -fexpensive-optimizations since it slows
3385 things down and doesn't usually win.
3387 This is not done in the COMPARE case above because the
3388 unmodified I2PAT is used in the PARALLEL and so a pattern
3389 with a modified I2SRC would not match. */
3391 if (flag_expensive_optimizations
)
3393 /* Pass pc_rtx so no substitutions are done, just
3397 subst_low_luid
= DF_INSN_LUID (i1
);
3398 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3401 subst_low_luid
= DF_INSN_LUID (i2
);
3402 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3405 n_occurrences
= 0; /* `subst' counts here */
3406 subst_low_luid
= DF_INSN_LUID (i2
);
3408 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3409 copy of I2SRC each time we substitute it, in order to avoid creating
3410 self-referential RTL when we will be substituting I1SRC for I1DEST
3411 later. Likewise if I0 feeds into I2, either directly or indirectly
3412 through I1, and I0DEST is in I0SRC. */
3413 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0, 0,
3414 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3415 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3416 && i0dest_in_i0src
));
3419 /* Record whether I2's body now appears within I3's body. */
3420 i2_is_used
= n_occurrences
;
3423 /* If we already got a failure, don't try to do more. Otherwise, try to
3424 substitute I1 if we have it. */
3426 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3428 /* Before we can do this substitution, we must redo the test done
3429 above (see detailed comments there) that ensures I1DEST isn't
3430 mentioned in any SETs in NEWPAT that are field assignments. */
3431 if (!combinable_i3pat (NULL
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3439 subst_low_luid
= DF_INSN_LUID (i1
);
3441 /* If the following substitution will modify I1SRC, make a copy of it
3442 for the case where it is substituted for I1DEST in I2PAT later. */
3443 if (added_sets_2
&& i1_feeds_i2_n
)
3444 i1src_copy
= copy_rtx (i1src
);
3446 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3447 copy of I1SRC each time we substitute it, in order to avoid creating
3448 self-referential RTL when we will be substituting I0SRC for I0DEST
3450 newpat
= subst (newpat
, i1dest
, i1src
, 0, 0,
3451 i0_feeds_i1_n
&& i0dest_in_i0src
);
3454 /* Record whether I1's body now appears within I3's body. */
3455 i1_is_used
= n_occurrences
;
3458 /* Likewise for I0 if we have it. */
3460 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3462 if (!combinable_i3pat (NULL
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3469 /* If the following substitution will modify I0SRC, make a copy of it
3470 for the case where it is substituted for I0DEST in I1PAT later. */
3471 if (added_sets_1
&& i0_feeds_i1_n
)
3472 i0src_copy
= copy_rtx (i0src
);
3473 /* And a copy for I0DEST in I2PAT substitution. */
3474 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3475 || (i0_feeds_i2_n
)))
3476 i0src_copy2
= copy_rtx (i0src
);
3479 subst_low_luid
= DF_INSN_LUID (i0
);
3480 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0, 0);
3486 int new_n_auto_inc
= 0;
3487 for_each_inc_dec (newpat
, count_auto_inc
, &new_n_auto_inc
);
3489 if (n_auto_inc
!= new_n_auto_inc
)
3491 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3492 fprintf (dump_file
, "Number of auto_inc expressions changed\n");
3498 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3499 to count all the ways that I2SRC and I1SRC can be used. */
3500 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3501 && i2_is_used
+ added_sets_2
> 1)
3502 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3503 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3505 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3506 && (n_occurrences
+ added_sets_0
3507 + (added_sets_1
&& i0_feeds_i1_n
)
3508 + (added_sets_2
&& i0_feeds_i2_n
)
3510 /* Fail if we tried to make a new register. */
3511 || max_reg_num () != maxreg
3512 /* Fail if we couldn't do something and have a CLOBBER. */
3513 || GET_CODE (newpat
) == CLOBBER
3514 /* Fail if this new pattern is a MULT and we didn't have one before
3515 at the outer level. */
3516 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3523 /* If the actions of the earlier insns must be kept
3524 in addition to substituting them into the latest one,
3525 we must make a new PARALLEL for the latest insn
3526 to hold additional the SETs. */
3528 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3530 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3533 if (GET_CODE (newpat
) == PARALLEL
)
3535 rtvec old
= XVEC (newpat
, 0);
3536 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3537 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3538 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3539 sizeof (old
->elem
[0]) * old
->num_elem
);
3544 total_sets
= 1 + extra_sets
;
3545 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3546 XVECEXP (newpat
, 0, 0) = old
;
3550 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3556 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0, 0);
3558 XVECEXP (newpat
, 0, --total_sets
) = t
;
3564 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0, 0,
3565 i0_feeds_i1_n
&& i0dest_in_i0src
);
3566 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3567 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0, 0);
3569 XVECEXP (newpat
, 0, --total_sets
) = t
;
3573 validate_replacement
:
3575 /* Note which hard regs this insn has as inputs. */
3576 mark_used_regs_combine (newpat
);
3578 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3579 consider splitting this pattern, we might need these clobbers. */
3580 if (i1
&& GET_CODE (newpat
) == PARALLEL
3581 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3583 int len
= XVECLEN (newpat
, 0);
3585 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3586 for (i
= 0; i
< len
; i
++)
3587 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3590 /* We have recognized nothing yet. */
3591 insn_code_number
= -1;
3593 /* See if this is a PARALLEL of two SETs where one SET's destination is
3594 a register that is unused and this isn't marked as an instruction that
3595 might trap in an EH region. In that case, we just need the other SET.
3596 We prefer this over the PARALLEL.
3598 This can occur when simplifying a divmod insn. We *must* test for this
3599 case here because the code below that splits two independent SETs doesn't
3600 handle this case correctly when it updates the register status.
3602 It's pointless doing this if we originally had two sets, one from
3603 i3, and one from i2. Combining then splitting the parallel results
3604 in the original i2 again plus an invalid insn (which we delete).
3605 The net effect is only to move instructions around, which makes
3606 debug info less accurate.
3608 If the remaining SET came from I2 its destination should not be used
3609 between I2 and I3. See PR82024. */
3611 if (!(added_sets_2
&& i1
== 0)
3612 && is_parallel_of_n_reg_sets (newpat
, 2)
3613 && asm_noperands (newpat
) < 0)
3615 rtx set0
= XVECEXP (newpat
, 0, 0);
3616 rtx set1
= XVECEXP (newpat
, 0, 1);
3617 rtx oldpat
= newpat
;
3619 if (((REG_P (SET_DEST (set1
))
3620 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3621 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3622 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3623 && insn_nothrow_p (i3
)
3624 && !side_effects_p (SET_SRC (set1
)))
3627 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3630 else if (((REG_P (SET_DEST (set0
))
3631 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3632 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3633 && find_reg_note (i3
, REG_UNUSED
,
3634 SUBREG_REG (SET_DEST (set0
)))))
3635 && insn_nothrow_p (i3
)
3636 && !side_effects_p (SET_SRC (set0
)))
3638 rtx dest
= SET_DEST (set1
);
3639 if (GET_CODE (dest
) == SUBREG
)
3640 dest
= SUBREG_REG (dest
);
3641 if (!reg_used_between_p (dest
, i2
, i3
))
3644 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3646 if (insn_code_number
>= 0)
3647 changed_i3_dest
= 1;
3651 if (insn_code_number
< 0)
3655 /* Is the result of combination a valid instruction? */
3656 if (insn_code_number
< 0)
3657 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3659 /* If we were combining three insns and the result is a simple SET
3660 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3661 insns. There are two ways to do this. It can be split using a
3662 machine-specific method (like when you have an addition of a large
3663 constant) or by combine in the function find_split_point. */
3665 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3666 && asm_noperands (newpat
) < 0)
3668 rtx parallel
, *split
;
3669 rtx_insn
*m_split_insn
;
3671 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3672 use I2DEST as a scratch register will help. In the latter case,
3673 convert I2DEST to the mode of the source of NEWPAT if we can. */
3675 m_split_insn
= combine_split_insns (newpat
, i3
);
3677 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3678 inputs of NEWPAT. */
3680 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3681 possible to try that as a scratch reg. This would require adding
3682 more code to make it work though. */
3684 if (m_split_insn
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3686 machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3688 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3689 (temporarily, until we are committed to this instruction
3690 combination) does not work: for example, any call to nonzero_bits
3691 on the register (from a splitter in the MD file, for example)
3692 will get the old information, which is invalid.
3694 Since nowadays we can create registers during combine just fine,
3695 we should just create a new one here, not reuse i2dest. */
3697 /* First try to split using the original register as a
3698 scratch register. */
3699 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3700 gen_rtvec (2, newpat
,
3701 gen_rtx_CLOBBER (VOIDmode
,
3703 m_split_insn
= combine_split_insns (parallel
, i3
);
3705 /* If that didn't work, try changing the mode of I2DEST if
3707 if (m_split_insn
== 0
3708 && new_mode
!= GET_MODE (i2dest
)
3709 && new_mode
!= VOIDmode
3710 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3712 machine_mode old_mode
= GET_MODE (i2dest
);
3715 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3716 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3719 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3720 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3723 parallel
= (gen_rtx_PARALLEL
3725 gen_rtvec (2, newpat
,
3726 gen_rtx_CLOBBER (VOIDmode
,
3728 m_split_insn
= combine_split_insns (parallel
, i3
);
3730 if (m_split_insn
== 0
3731 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3735 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3736 buf
= undobuf
.undos
;
3737 undobuf
.undos
= buf
->next
;
3738 buf
->next
= undobuf
.frees
;
3739 undobuf
.frees
= buf
;
3743 i2scratch
= m_split_insn
!= 0;
3746 /* If recog_for_combine has discarded clobbers, try to use them
3747 again for the split. */
3748 if (m_split_insn
== 0 && newpat_vec_with_clobbers
)
3750 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3751 m_split_insn
= combine_split_insns (parallel
, i3
);
3754 if (m_split_insn
&& NEXT_INSN (m_split_insn
) == NULL_RTX
)
3756 rtx m_split_pat
= PATTERN (m_split_insn
);
3757 insn_code_number
= recog_for_combine (&m_split_pat
, i3
, &new_i3_notes
);
3758 if (insn_code_number
>= 0)
3759 newpat
= m_split_pat
;
3761 else if (m_split_insn
&& NEXT_INSN (NEXT_INSN (m_split_insn
)) == NULL_RTX
3762 && (next_nonnote_nondebug_insn (i2
) == i3
3763 || !modified_between_p (PATTERN (m_split_insn
), i2
, i3
)))
3766 rtx newi3pat
= PATTERN (NEXT_INSN (m_split_insn
));
3767 newi2pat
= PATTERN (m_split_insn
);
3769 i3set
= single_set (NEXT_INSN (m_split_insn
));
3770 i2set
= single_set (m_split_insn
);
3772 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3774 /* If I2 or I3 has multiple SETs, we won't know how to track
3775 register status, so don't use these insns. If I2's destination
3776 is used between I2 and I3, we also can't use these insns. */
3778 if (i2_code_number
>= 0 && i2set
&& i3set
3779 && (next_nonnote_nondebug_insn (i2
) == i3
3780 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3781 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3783 if (insn_code_number
>= 0)
3786 /* It is possible that both insns now set the destination of I3.
3787 If so, we must show an extra use of it. */
3789 if (insn_code_number
>= 0)
3791 rtx new_i3_dest
= SET_DEST (i3set
);
3792 rtx new_i2_dest
= SET_DEST (i2set
);
3794 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3795 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3796 || GET_CODE (new_i3_dest
) == SUBREG
)
3797 new_i3_dest
= XEXP (new_i3_dest
, 0);
3799 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3800 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3801 || GET_CODE (new_i2_dest
) == SUBREG
)
3802 new_i2_dest
= XEXP (new_i2_dest
, 0);
3804 if (REG_P (new_i3_dest
)
3805 && REG_P (new_i2_dest
)
3806 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
)
3807 && REGNO (new_i2_dest
) < reg_n_sets_max
)
3808 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3812 /* If we can split it and use I2DEST, go ahead and see if that
3813 helps things be recognized. Verify that none of the registers
3814 are set between I2 and I3. */
3815 if (insn_code_number
< 0
3816 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3817 && (!HAVE_cc0
|| REG_P (i2dest
))
3818 /* We need I2DEST in the proper mode. If it is a hard register
3819 or the only use of a pseudo, we can change its mode.
3820 Make sure we don't change a hard register to have a mode that
3821 isn't valid for it, or change the number of registers. */
3822 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3823 || GET_MODE (*split
) == VOIDmode
3824 || can_change_dest_mode (i2dest
, added_sets_2
,
3826 && (next_nonnote_nondebug_insn (i2
) == i3
3827 || !modified_between_p (*split
, i2
, i3
))
3828 /* We can't overwrite I2DEST if its value is still used by
3830 && ! reg_referenced_p (i2dest
, newpat
))
3832 rtx newdest
= i2dest
;
3833 enum rtx_code split_code
= GET_CODE (*split
);
3834 machine_mode split_mode
= GET_MODE (*split
);
3835 bool subst_done
= false;
3836 newi2pat
= NULL_RTX
;
3840 /* *SPLIT may be part of I2SRC, so make sure we have the
3841 original expression around for later debug processing.
3842 We should not need I2SRC any more in other cases. */
3843 if (MAY_HAVE_DEBUG_BIND_INSNS
)
3844 i2src
= copy_rtx (i2src
);
3848 /* Get NEWDEST as a register in the proper mode. We have already
3849 validated that we can do this. */
3850 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3852 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3853 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3856 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3857 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3861 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3862 an ASHIFT. This can occur if it was inside a PLUS and hence
3863 appeared to be a memory address. This is a kludge. */
3864 if (split_code
== MULT
3865 && CONST_INT_P (XEXP (*split
, 1))
3866 && INTVAL (XEXP (*split
, 1)) > 0
3867 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3869 rtx i_rtx
= gen_int_shift_amount (split_mode
, i
);
3870 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3871 XEXP (*split
, 0), i_rtx
));
3872 /* Update split_code because we may not have a multiply
3874 split_code
= GET_CODE (*split
);
3877 /* Similarly for (plus (mult FOO (const_int pow2))). */
3878 if (split_code
== PLUS
3879 && GET_CODE (XEXP (*split
, 0)) == MULT
3880 && CONST_INT_P (XEXP (XEXP (*split
, 0), 1))
3881 && INTVAL (XEXP (XEXP (*split
, 0), 1)) > 0
3882 && (i
= exact_log2 (UINTVAL (XEXP (XEXP (*split
, 0), 1)))) >= 0)
3884 rtx nsplit
= XEXP (*split
, 0);
3885 rtx i_rtx
= gen_int_shift_amount (GET_MODE (nsplit
), i
);
3886 SUBST (XEXP (*split
, 0), gen_rtx_ASHIFT (GET_MODE (nsplit
),
3889 /* Update split_code because we may not have a multiply
3891 split_code
= GET_CODE (*split
);
3894 #ifdef INSN_SCHEDULING
3895 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3896 be written as a ZERO_EXTEND. */
3897 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3899 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3900 what it really is. */
3901 if (load_extend_op (GET_MODE (SUBREG_REG (*split
)))
3903 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3904 SUBREG_REG (*split
)));
3906 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3907 SUBREG_REG (*split
)));
3911 /* Attempt to split binary operators using arithmetic identities. */
3912 if (BINARY_P (SET_SRC (newpat
))
3913 && split_mode
== GET_MODE (SET_SRC (newpat
))
3914 && ! side_effects_p (SET_SRC (newpat
)))
3916 rtx setsrc
= SET_SRC (newpat
);
3917 machine_mode mode
= GET_MODE (setsrc
);
3918 enum rtx_code code
= GET_CODE (setsrc
);
3919 rtx src_op0
= XEXP (setsrc
, 0);
3920 rtx src_op1
= XEXP (setsrc
, 1);
3922 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3923 if (rtx_equal_p (src_op0
, src_op1
))
3925 newi2pat
= gen_rtx_SET (newdest
, src_op0
);
3926 SUBST (XEXP (setsrc
, 0), newdest
);
3927 SUBST (XEXP (setsrc
, 1), newdest
);
3930 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3931 else if ((code
== PLUS
|| code
== MULT
)
3932 && GET_CODE (src_op0
) == code
3933 && GET_CODE (XEXP (src_op0
, 0)) == code
3934 && (INTEGRAL_MODE_P (mode
)
3935 || (FLOAT_MODE_P (mode
)
3936 && flag_unsafe_math_optimizations
)))
3938 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3939 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3940 rtx r
= XEXP (src_op0
, 1);
3943 /* Split both "((X op Y) op X) op Y" and
3944 "((X op Y) op Y) op X" as "T op T" where T is
3946 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3947 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3949 newi2pat
= gen_rtx_SET (newdest
, XEXP (src_op0
, 0));
3950 SUBST (XEXP (setsrc
, 0), newdest
);
3951 SUBST (XEXP (setsrc
, 1), newdest
);
3954 /* Split "((X op X) op Y) op Y)" as "T op T" where
3956 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3958 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3959 newi2pat
= gen_rtx_SET (newdest
, tmp
);
3960 SUBST (XEXP (setsrc
, 0), newdest
);
3961 SUBST (XEXP (setsrc
, 1), newdest
);
3969 newi2pat
= gen_rtx_SET (newdest
, *split
);
3970 SUBST (*split
, newdest
);
3973 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3975 /* recog_for_combine might have added CLOBBERs to newi2pat.
3976 Make sure NEWPAT does not depend on the clobbered regs. */
3977 if (GET_CODE (newi2pat
) == PARALLEL
)
3978 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3979 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3981 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3982 if (reg_overlap_mentioned_p (reg
, newpat
))
3989 /* If the split point was a MULT and we didn't have one before,
3990 don't use one now. */
3991 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3992 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3996 /* Check for a case where we loaded from memory in a narrow mode and
3997 then sign extended it, but we need both registers. In that case,
3998 we have a PARALLEL with both loads from the same memory location.
3999 We can split this into a load from memory followed by a register-register
4000 copy. This saves at least one insn, more if register allocation can
4003 We cannot do this if the destination of the first assignment is a
4004 condition code register or cc0. We eliminate this case by making sure
4005 the SET_DEST and SET_SRC have the same mode.
4007 We cannot do this if the destination of the second assignment is
4008 a register that we have already assumed is zero-extended. Similarly
4009 for a SUBREG of such a register. */
4011 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
4012 && GET_CODE (newpat
) == PARALLEL
4013 && XVECLEN (newpat
, 0) == 2
4014 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
4015 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
4016 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
4017 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
4018 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
4019 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
4020 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
4021 && !modified_between_p (SET_SRC (XVECEXP (newpat
, 0, 1)), i2
, i3
)
4022 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
4023 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
4024 && ! (temp_expr
= SET_DEST (XVECEXP (newpat
, 0, 1)),
4026 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
4027 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
4029 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
4031 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
4032 != GET_MODE_MASK (word_mode
))))
4033 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
4034 && (temp_expr
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
4036 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
4037 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
4039 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
4041 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
4042 != GET_MODE_MASK (word_mode
)))))
4043 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
4044 SET_SRC (XVECEXP (newpat
, 0, 1)))
4045 && ! find_reg_note (i3
, REG_UNUSED
,
4046 SET_DEST (XVECEXP (newpat
, 0, 0))))
4050 newi2pat
= XVECEXP (newpat
, 0, 0);
4051 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
4052 newpat
= XVECEXP (newpat
, 0, 1);
4053 SUBST (SET_SRC (newpat
),
4054 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
4055 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
4057 if (i2_code_number
>= 0)
4058 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4060 if (insn_code_number
>= 0)
4064 /* Similarly, check for a case where we have a PARALLEL of two independent
4065 SETs but we started with three insns. In this case, we can do the sets
4066 as two separate insns. This case occurs when some SET allows two
4067 other insns to combine, but the destination of that SET is still live.
4069 Also do this if we started with two insns and (at least) one of the
4070 resulting sets is a noop; this noop will be deleted later.
4072 Also do this if we started with two insns neither of which was a simple
4075 else if (insn_code_number
< 0 && asm_noperands (newpat
) < 0
4076 && GET_CODE (newpat
) == PARALLEL
4077 && XVECLEN (newpat
, 0) == 2
4078 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
4079 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
4081 || set_noop_p (XVECEXP (newpat
, 0, 0))
4082 || set_noop_p (XVECEXP (newpat
, 0, 1))
4083 || (!i2_was_move
&& !i3_was_move
))
4084 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
4085 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
4086 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
4087 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
4088 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
4089 XVECEXP (newpat
, 0, 0))
4090 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
4091 XVECEXP (newpat
, 0, 1))
4092 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
4093 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
4095 rtx set0
= XVECEXP (newpat
, 0, 0);
4096 rtx set1
= XVECEXP (newpat
, 0, 1);
4098 /* Normally, it doesn't matter which of the two is done first,
4099 but the one that references cc0 can't be the second, and
4100 one which uses any regs/memory set in between i2 and i3 can't
4101 be first. The PARALLEL might also have been pre-existing in i3,
4102 so we need to make sure that we won't wrongly hoist a SET to i2
4103 that would conflict with a death note present in there, or would
4104 have its dest modified between i2 and i3. */
4105 if (!modified_between_p (SET_SRC (set1
), i2
, i3
)
4106 && !(REG_P (SET_DEST (set1
))
4107 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set1
)))
4108 && !(GET_CODE (SET_DEST (set1
)) == SUBREG
4109 && find_reg_note (i2
, REG_DEAD
,
4110 SUBREG_REG (SET_DEST (set1
))))
4111 && !modified_between_p (SET_DEST (set1
), i2
, i3
)
4112 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set0
))
4113 /* If I3 is a jump, ensure that set0 is a jump so that
4114 we do not create invalid RTL. */
4115 && (!JUMP_P (i3
) || SET_DEST (set0
) == pc_rtx
)
4121 else if (!modified_between_p (SET_SRC (set0
), i2
, i3
)
4122 && !(REG_P (SET_DEST (set0
))
4123 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set0
)))
4124 && !(GET_CODE (SET_DEST (set0
)) == SUBREG
4125 && find_reg_note (i2
, REG_DEAD
,
4126 SUBREG_REG (SET_DEST (set0
))))
4127 && !modified_between_p (SET_DEST (set0
), i2
, i3
)
4128 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set1
))
4129 /* If I3 is a jump, ensure that set1 is a jump so that
4130 we do not create invalid RTL. */
4131 && (!JUMP_P (i3
) || SET_DEST (set1
) == pc_rtx
)
4143 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
4145 if (i2_code_number
>= 0)
4147 /* recog_for_combine might have added CLOBBERs to newi2pat.
4148 Make sure NEWPAT does not depend on the clobbered regs. */
4149 if (GET_CODE (newi2pat
) == PARALLEL
)
4151 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
4152 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
4154 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
4155 if (reg_overlap_mentioned_p (reg
, newpat
))
4163 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4165 if (insn_code_number
>= 0)
4170 /* If it still isn't recognized, fail and change things back the way they
4172 if ((insn_code_number
< 0
4173 /* Is the result a reasonable ASM_OPERANDS? */
4174 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
4180 /* If we had to change another insn, make sure it is valid also. */
4181 if (undobuf
.other_insn
)
4183 CLEAR_HARD_REG_SET (newpat_used_regs
);
4185 other_pat
= PATTERN (undobuf
.other_insn
);
4186 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
4189 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
4196 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4197 they are adjacent to each other or not. */
4200 rtx_insn
*p
= prev_nonnote_insn (i3
);
4201 if (p
&& p
!= i2
&& NONJUMP_INSN_P (p
) && newi2pat
4202 && sets_cc0_p (newi2pat
))
4209 /* Only allow this combination if insn_cost reports that the
4210 replacement instructions are cheaper than the originals. */
4211 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
4217 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4221 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
4222 if (undo
->kind
== UNDO_MODE
)
4224 rtx reg
= *undo
->where
.r
;
4225 machine_mode new_mode
= GET_MODE (reg
);
4226 machine_mode old_mode
= undo
->old_contents
.m
;
4228 /* Temporarily revert mode back. */
4229 adjust_reg_mode (reg
, old_mode
);
4231 if (reg
== i2dest
&& i2scratch
)
4233 /* If we used i2dest as a scratch register with a
4234 different mode, substitute it for the original
4235 i2src while its original mode is temporarily
4236 restored, and then clear i2scratch so that we don't
4237 do it again later. */
4238 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
,
4241 /* Put back the new mode. */
4242 adjust_reg_mode (reg
, new_mode
);
4246 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
4247 rtx_insn
*first
, *last
;
4252 last
= last_combined_insn
;
4257 last
= undobuf
.other_insn
;
4259 if (DF_INSN_LUID (last
)
4260 < DF_INSN_LUID (last_combined_insn
))
4261 last
= last_combined_insn
;
4264 /* We're dealing with a reg that changed mode but not
4265 meaning, so we want to turn it into a subreg for
4266 the new mode. However, because of REG sharing and
4267 because its mode had already changed, we have to do
4268 it in two steps. First, replace any debug uses of
4269 reg, with its original mode temporarily restored,
4270 with this copy we have created; then, replace the
4271 copy with the SUBREG of the original shared reg,
4272 once again changed to the new mode. */
4273 propagate_for_debug (first
, last
, reg
, tempreg
,
4275 adjust_reg_mode (reg
, new_mode
);
4276 propagate_for_debug (first
, last
, tempreg
,
4277 lowpart_subreg (old_mode
, reg
, new_mode
),
4283 /* If we will be able to accept this, we have made a
4284 change to the destination of I3. This requires us to
4285 do a few adjustments. */
4287 if (changed_i3_dest
)
4289 PATTERN (i3
) = newpat
;
4290 adjust_for_new_dest (i3
);
4293 /* We now know that we can do this combination. Merge the insns and
4294 update the status of registers and LOG_LINKS. */
4296 if (undobuf
.other_insn
)
4300 PATTERN (undobuf
.other_insn
) = other_pat
;
4302 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4303 ensure that they are still valid. Then add any non-duplicate
4304 notes added by recog_for_combine. */
4305 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
4307 next
= XEXP (note
, 1);
4309 if ((REG_NOTE_KIND (note
) == REG_DEAD
4310 && !reg_referenced_p (XEXP (note
, 0),
4311 PATTERN (undobuf
.other_insn
)))
4312 ||(REG_NOTE_KIND (note
) == REG_UNUSED
4313 && !reg_set_p (XEXP (note
, 0),
4314 PATTERN (undobuf
.other_insn
)))
4315 /* Simply drop equal note since it may be no longer valid
4316 for other_insn. It may be possible to record that CC
4317 register is changed and only discard those notes, but
4318 in practice it's unnecessary complication and doesn't
4319 give any meaningful improvement.
4322 || REG_NOTE_KIND (note
) == REG_EQUAL
4323 || REG_NOTE_KIND (note
) == REG_EQUIV
)
4324 remove_note (undobuf
.other_insn
, note
);
4327 distribute_notes (new_other_notes
, undobuf
.other_insn
,
4328 undobuf
.other_insn
, NULL
, NULL_RTX
, NULL_RTX
,
4334 /* I3 now uses what used to be its destination and which is now
4335 I2's destination. This requires us to do a few adjustments. */
4336 PATTERN (i3
) = newpat
;
4337 adjust_for_new_dest (i3
);
4340 if (swap_i2i3
|| split_i2i3
)
4342 /* We might need a LOG_LINK from I3 to I2. But then we used to
4343 have one, so we still will.
4345 However, some later insn might be using I2's dest and have
4346 a LOG_LINK pointing at I3. We should change it to point at
4349 /* newi2pat is usually a SET here; however, recog_for_combine might
4350 have added some clobbers. */
4352 if (GET_CODE (x
) == PARALLEL
)
4353 x
= XVECEXP (newi2pat
, 0, 0);
4355 /* It can only be a SET of a REG or of a SUBREG of a REG. */
4356 unsigned int regno
= reg_or_subregno (SET_DEST (x
));
4359 for (rtx_insn
*insn
= NEXT_INSN (i3
);
4362 && NONDEBUG_INSN_P (insn
)
4363 && BLOCK_FOR_INSN (insn
) == this_basic_block
;
4364 insn
= NEXT_INSN (insn
))
4366 struct insn_link
*link
;
4367 FOR_EACH_LOG_LINK (link
, insn
)
4368 if (link
->insn
== i3
&& link
->regno
== regno
)
4378 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4379 struct insn_link
*i3links
, *i2links
, *i1links
= 0, *i0links
= 0;
4382 /* Compute which registers we expect to eliminate. newi2pat may be setting
4383 either i3dest or i2dest, so we must check it. */
4384 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4385 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4388 /* For i1, we need to compute both local elimination and global
4389 elimination information with respect to newi2pat because i1dest
4390 may be the same as i3dest, in which case newi2pat may be setting
4391 i1dest. Global information is used when distributing REG_DEAD
4392 note for i2 and i3, in which case it does matter if newi2pat sets
4395 Local information is used when distributing REG_DEAD note for i1,
4396 in which case it doesn't matter if newi2pat sets i1dest or not.
4397 See PR62151, if we have four insns combination:
4399 i1: r1 <- i1src (using r0)
4401 i2: r0 <- i2src (using r1)
4402 i3: r3 <- i3src (using r0)
4404 From i1's point of view, r0 is eliminated, no matter if it is set
4405 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4406 should be discarded.
4408 Note local information only affects cases in forms like "I1->I2->I3",
4409 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4410 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4412 rtx local_elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4415 rtx elim_i1
= (local_elim_i1
== 0
4416 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4418 /* Same case as i1. */
4419 rtx local_elim_i0
= (i0
== 0 || i0dest_in_i0src
|| !i0dest_killed
4421 rtx elim_i0
= (local_elim_i0
== 0
4422 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4425 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4427 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4428 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4430 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4432 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4434 /* Ensure that we do not have something that should not be shared but
4435 occurs multiple times in the new insns. Check this by first
4436 resetting all the `used' flags and then copying anything is shared. */
4438 reset_used_flags (i3notes
);
4439 reset_used_flags (i2notes
);
4440 reset_used_flags (i1notes
);
4441 reset_used_flags (i0notes
);
4442 reset_used_flags (newpat
);
4443 reset_used_flags (newi2pat
);
4444 if (undobuf
.other_insn
)
4445 reset_used_flags (PATTERN (undobuf
.other_insn
));
4447 i3notes
= copy_rtx_if_shared (i3notes
);
4448 i2notes
= copy_rtx_if_shared (i2notes
);
4449 i1notes
= copy_rtx_if_shared (i1notes
);
4450 i0notes
= copy_rtx_if_shared (i0notes
);
4451 newpat
= copy_rtx_if_shared (newpat
);
4452 newi2pat
= copy_rtx_if_shared (newi2pat
);
4453 if (undobuf
.other_insn
)
4454 reset_used_flags (PATTERN (undobuf
.other_insn
));
4456 INSN_CODE (i3
) = insn_code_number
;
4457 PATTERN (i3
) = newpat
;
4459 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4461 for (rtx link
= CALL_INSN_FUNCTION_USAGE (i3
); link
;
4462 link
= XEXP (link
, 1))
4466 /* I2SRC must still be meaningful at this point. Some
4467 splitting operations can invalidate I2SRC, but those
4468 operations do not apply to calls. */
4470 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4474 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4477 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4482 if (undobuf
.other_insn
)
4483 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4485 /* We had one special case above where I2 had more than one set and
4486 we replaced a destination of one of those sets with the destination
4487 of I3. In that case, we have to update LOG_LINKS of insns later
4488 in this basic block. Note that this (expensive) case is rare.
4490 Also, in this case, we must pretend that all REG_NOTEs for I2
4491 actually came from I3, so that REG_UNUSED notes from I2 will be
4492 properly handled. */
4494 if (i3_subst_into_i2
)
4496 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4497 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4498 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4499 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4500 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4501 && ! find_reg_note (i2
, REG_UNUSED
,
4502 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4503 for (temp_insn
= NEXT_INSN (i2
);
4505 && (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4506 || BB_HEAD (this_basic_block
) != temp_insn
);
4507 temp_insn
= NEXT_INSN (temp_insn
))
4508 if (temp_insn
!= i3
&& NONDEBUG_INSN_P (temp_insn
))
4509 FOR_EACH_LOG_LINK (link
, temp_insn
)
4510 if (link
->insn
== i2
)
4516 while (XEXP (link
, 1))
4517 link
= XEXP (link
, 1);
4518 XEXP (link
, 1) = i2notes
;
4525 LOG_LINKS (i3
) = NULL
;
4527 LOG_LINKS (i2
) = NULL
;
4532 if (MAY_HAVE_DEBUG_BIND_INSNS
&& i2scratch
)
4533 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4535 INSN_CODE (i2
) = i2_code_number
;
4536 PATTERN (i2
) = newi2pat
;
4540 if (MAY_HAVE_DEBUG_BIND_INSNS
&& i2src
)
4541 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4543 SET_INSN_DELETED (i2
);
4548 LOG_LINKS (i1
) = NULL
;
4550 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4551 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
,
4553 SET_INSN_DELETED (i1
);
4558 LOG_LINKS (i0
) = NULL
;
4560 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4561 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
,
4563 SET_INSN_DELETED (i0
);
4566 /* Get death notes for everything that is now used in either I3 or
4567 I2 and used to die in a previous insn. If we built two new
4568 patterns, move from I1 to I2 then I2 to I3 so that we get the
4569 proper movement on registers that I2 modifies. */
4572 from_luid
= DF_INSN_LUID (i0
);
4574 from_luid
= DF_INSN_LUID (i1
);
4576 from_luid
= DF_INSN_LUID (i2
);
4578 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4579 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4581 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4583 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL
,
4584 elim_i2
, elim_i1
, elim_i0
);
4586 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL
,
4587 elim_i2
, elim_i1
, elim_i0
);
4589 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL
,
4590 elim_i2
, local_elim_i1
, local_elim_i0
);
4592 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL
,
4593 elim_i2
, elim_i1
, local_elim_i0
);
4595 distribute_notes (midnotes
, NULL
, i3
, newi2pat
? i2
: NULL
,
4596 elim_i2
, elim_i1
, elim_i0
);
4598 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4599 know these are REG_UNUSED and want them to go to the desired insn,
4600 so we always pass it as i3. */
4602 if (newi2pat
&& new_i2_notes
)
4603 distribute_notes (new_i2_notes
, i2
, i2
, NULL
, NULL_RTX
, NULL_RTX
,
4607 distribute_notes (new_i3_notes
, i3
, i3
, NULL
, NULL_RTX
, NULL_RTX
,
4610 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4611 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4612 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4613 in that case, it might delete I2. Similarly for I2 and I1.
4614 Show an additional death due to the REG_DEAD note we make here. If
4615 we discard it in distribute_notes, we will decrement it again. */
4619 rtx new_note
= alloc_reg_note (REG_DEAD
, i3dest_killed
, NULL_RTX
);
4620 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4621 distribute_notes (new_note
, NULL
, i2
, NULL
, elim_i2
,
4624 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4625 elim_i2
, elim_i1
, elim_i0
);
4628 if (i2dest_in_i2src
)
4630 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4631 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4632 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4633 NULL_RTX
, NULL_RTX
);
4635 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4636 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4639 if (i1dest_in_i1src
)
4641 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4642 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4643 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4644 NULL_RTX
, NULL_RTX
);
4646 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4647 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4650 if (i0dest_in_i0src
)
4652 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4653 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4654 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4655 NULL_RTX
, NULL_RTX
);
4657 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4658 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4661 distribute_links (i3links
);
4662 distribute_links (i2links
);
4663 distribute_links (i1links
);
4664 distribute_links (i0links
);
4668 struct insn_link
*link
;
4669 rtx_insn
*i2_insn
= 0;
4670 rtx i2_val
= 0, set
;
4672 /* The insn that used to set this register doesn't exist, and
4673 this life of the register may not exist either. See if one of
4674 I3's links points to an insn that sets I2DEST. If it does,
4675 that is now the last known value for I2DEST. If we don't update
4676 this and I2 set the register to a value that depended on its old
4677 contents, we will get confused. If this insn is used, thing
4678 will be set correctly in combine_instructions. */
4679 FOR_EACH_LOG_LINK (link
, i3
)
4680 if ((set
= single_set (link
->insn
)) != 0
4681 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4682 i2_insn
= link
->insn
, i2_val
= SET_SRC (set
);
4684 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4686 /* If the reg formerly set in I2 died only once and that was in I3,
4687 zero its use count so it won't make `reload' do any work. */
4689 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4690 && ! i2dest_in_i2src
4691 && REGNO (i2dest
) < reg_n_sets_max
)
4692 INC_REG_N_SETS (REGNO (i2dest
), -1);
4695 if (i1
&& REG_P (i1dest
))
4697 struct insn_link
*link
;
4698 rtx_insn
*i1_insn
= 0;
4699 rtx i1_val
= 0, set
;
4701 FOR_EACH_LOG_LINK (link
, i3
)
4702 if ((set
= single_set (link
->insn
)) != 0
4703 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4704 i1_insn
= link
->insn
, i1_val
= SET_SRC (set
);
4706 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4709 && ! i1dest_in_i1src
4710 && REGNO (i1dest
) < reg_n_sets_max
)
4711 INC_REG_N_SETS (REGNO (i1dest
), -1);
4714 if (i0
&& REG_P (i0dest
))
4716 struct insn_link
*link
;
4717 rtx_insn
*i0_insn
= 0;
4718 rtx i0_val
= 0, set
;
4720 FOR_EACH_LOG_LINK (link
, i3
)
4721 if ((set
= single_set (link
->insn
)) != 0
4722 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4723 i0_insn
= link
->insn
, i0_val
= SET_SRC (set
);
4725 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4728 && ! i0dest_in_i0src
4729 && REGNO (i0dest
) < reg_n_sets_max
)
4730 INC_REG_N_SETS (REGNO (i0dest
), -1);
4733 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4734 been made to this insn. The order is important, because newi2pat
4735 can affect nonzero_bits of newpat. */
4737 note_pattern_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4738 note_pattern_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4741 if (undobuf
.other_insn
!= NULL_RTX
)
4745 fprintf (dump_file
, "modifying other_insn ");
4746 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4748 df_insn_rescan (undobuf
.other_insn
);
4751 if (i0
&& !(NOTE_P (i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4755 fprintf (dump_file
, "modifying insn i0 ");
4756 dump_insn_slim (dump_file
, i0
);
4758 df_insn_rescan (i0
);
4761 if (i1
&& !(NOTE_P (i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4765 fprintf (dump_file
, "modifying insn i1 ");
4766 dump_insn_slim (dump_file
, i1
);
4768 df_insn_rescan (i1
);
4771 if (i2
&& !(NOTE_P (i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4775 fprintf (dump_file
, "modifying insn i2 ");
4776 dump_insn_slim (dump_file
, i2
);
4778 df_insn_rescan (i2
);
4781 if (i3
&& !(NOTE_P (i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4785 fprintf (dump_file
, "modifying insn i3 ");
4786 dump_insn_slim (dump_file
, i3
);
4788 df_insn_rescan (i3
);
4791 /* Set new_direct_jump_p if a new return or simple jump instruction
4792 has been created. Adjust the CFG accordingly. */
4793 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4795 *new_direct_jump_p
= 1;
4796 mark_jump_label (PATTERN (i3
), i3
, 0);
4797 update_cfg_for_uncondjump (i3
);
4800 if (undobuf
.other_insn
!= NULL_RTX
4801 && (returnjump_p (undobuf
.other_insn
)
4802 || any_uncondjump_p (undobuf
.other_insn
)))
4804 *new_direct_jump_p
= 1;
4805 update_cfg_for_uncondjump (undobuf
.other_insn
);
4808 if (GET_CODE (PATTERN (i3
)) == TRAP_IF
4809 && XEXP (PATTERN (i3
), 0) == const1_rtx
)
4811 basic_block bb
= BLOCK_FOR_INSN (i3
);
4813 remove_edge (split_block (bb
, i3
));
4814 emit_barrier_after_bb (bb
);
4815 *new_direct_jump_p
= 1;
4818 if (undobuf
.other_insn
4819 && GET_CODE (PATTERN (undobuf
.other_insn
)) == TRAP_IF
4820 && XEXP (PATTERN (undobuf
.other_insn
), 0) == const1_rtx
)
4822 basic_block bb
= BLOCK_FOR_INSN (undobuf
.other_insn
);
4824 remove_edge (split_block (bb
, undobuf
.other_insn
));
4825 emit_barrier_after_bb (bb
);
4826 *new_direct_jump_p
= 1;
4829 /* A noop might also need cleaning up of CFG, if it comes from the
4830 simplification of a jump. */
4832 && GET_CODE (newpat
) == SET
4833 && SET_SRC (newpat
) == pc_rtx
4834 && SET_DEST (newpat
) == pc_rtx
)
4836 *new_direct_jump_p
= 1;
4837 update_cfg_for_uncondjump (i3
);
4840 if (undobuf
.other_insn
!= NULL_RTX
4841 && JUMP_P (undobuf
.other_insn
)
4842 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4843 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4844 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4846 *new_direct_jump_p
= 1;
4847 update_cfg_for_uncondjump (undobuf
.other_insn
);
4850 combine_successes
++;
4853 rtx_insn
*ret
= newi2pat
? i2
: i3
;
4854 if (added_links_insn
&& DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (ret
))
4855 ret
= added_links_insn
;
4856 if (added_notes_insn
&& DF_INSN_LUID (added_notes_insn
) < DF_INSN_LUID (ret
))
4857 ret
= added_notes_insn
;
4862 /* Get a marker for undoing to the current state. */
4865 get_undo_marker (void)
4867 return undobuf
.undos
;
4870 /* Undo the modifications up to the marker. */
4873 undo_to_marker (void *marker
)
4875 struct undo
*undo
, *next
;
4877 for (undo
= undobuf
.undos
; undo
!= marker
; undo
= next
)
4885 *undo
->where
.r
= undo
->old_contents
.r
;
4888 *undo
->where
.i
= undo
->old_contents
.i
;
4891 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4894 *undo
->where
.l
= undo
->old_contents
.l
;
4900 undo
->next
= undobuf
.frees
;
4901 undobuf
.frees
= undo
;
4904 undobuf
.undos
= (struct undo
*) marker
;
4907 /* Undo all the modifications recorded in undobuf. */
4915 /* We've committed to accepting the changes we made. Move all
4916 of the undos to the free list. */
4921 struct undo
*undo
, *next
;
4923 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4926 undo
->next
= undobuf
.frees
;
4927 undobuf
.frees
= undo
;
4932 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4933 where we have an arithmetic expression and return that point. LOC will
4936 try_combine will call this function to see if an insn can be split into
4940 find_split_point (rtx
*loc
, rtx_insn
*insn
, bool set_src
)
4943 enum rtx_code code
= GET_CODE (x
);
4945 unsigned HOST_WIDE_INT len
= 0;
4946 HOST_WIDE_INT pos
= 0;
4948 rtx inner
= NULL_RTX
;
4949 scalar_int_mode mode
, inner_mode
;
4951 /* First special-case some codes. */
4955 #ifdef INSN_SCHEDULING
4956 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4958 if (MEM_P (SUBREG_REG (x
)))
4961 return find_split_point (&SUBREG_REG (x
), insn
, false);
4964 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4965 using LO_SUM and HIGH. */
4966 if (HAVE_lo_sum
&& (GET_CODE (XEXP (x
, 0)) == CONST
4967 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
))
4969 machine_mode address_mode
= get_address_mode (x
);
4972 gen_rtx_LO_SUM (address_mode
,
4973 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4975 return &XEXP (XEXP (x
, 0), 0);
4978 /* If we have a PLUS whose second operand is a constant and the
4979 address is not valid, perhaps we can split it up using
4980 the machine-specific way to split large constants. We use
4981 the first pseudo-reg (one of the virtual regs) as a placeholder;
4982 it will not remain in the result. */
4983 if (GET_CODE (XEXP (x
, 0)) == PLUS
4984 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4985 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4986 MEM_ADDR_SPACE (x
)))
4988 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4989 rtx_insn
*seq
= combine_split_insns (gen_rtx_SET (reg
, XEXP (x
, 0)),
4992 /* This should have produced two insns, each of which sets our
4993 placeholder. If the source of the second is a valid address,
4994 we can put both sources together and make a split point
4998 && NEXT_INSN (seq
) != NULL_RTX
4999 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
5000 && NONJUMP_INSN_P (seq
)
5001 && GET_CODE (PATTERN (seq
)) == SET
5002 && SET_DEST (PATTERN (seq
)) == reg
5003 && ! reg_mentioned_p (reg
,
5004 SET_SRC (PATTERN (seq
)))
5005 && NONJUMP_INSN_P (NEXT_INSN (seq
))
5006 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
5007 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
5008 && memory_address_addr_space_p
5009 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
5010 MEM_ADDR_SPACE (x
)))
5012 rtx src1
= SET_SRC (PATTERN (seq
));
5013 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
5015 /* Replace the placeholder in SRC2 with SRC1. If we can
5016 find where in SRC2 it was placed, that can become our
5017 split point and we can replace this address with SRC2.
5018 Just try two obvious places. */
5020 src2
= replace_rtx (src2
, reg
, src1
);
5022 if (XEXP (src2
, 0) == src1
)
5023 split
= &XEXP (src2
, 0);
5024 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
5025 && XEXP (XEXP (src2
, 0), 0) == src1
)
5026 split
= &XEXP (XEXP (src2
, 0), 0);
5030 SUBST (XEXP (x
, 0), src2
);
5035 /* If that didn't work and we have a nested plus, like:
5036 ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2
5037 is valid address, try to split (REG1 * CONST1). */
5038 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
5039 && !OBJECT_P (XEXP (XEXP (XEXP (x
, 0), 0), 0))
5040 && OBJECT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5041 && ! (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SUBREG
5042 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x
, 0),
5045 rtx tem
= XEXP (XEXP (XEXP (x
, 0), 0), 0);
5046 XEXP (XEXP (XEXP (x
, 0), 0), 0) = reg
;
5047 if (memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
5048 MEM_ADDR_SPACE (x
)))
5050 XEXP (XEXP (XEXP (x
, 0), 0), 0) = tem
;
5051 return &XEXP (XEXP (XEXP (x
, 0), 0), 0);
5053 XEXP (XEXP (XEXP (x
, 0), 0), 0) = tem
;
5055 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
5056 && OBJECT_P (XEXP (XEXP (XEXP (x
, 0), 0), 0))
5057 && !OBJECT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5058 && ! (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == SUBREG
5059 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x
, 0),
5062 rtx tem
= XEXP (XEXP (XEXP (x
, 0), 0), 1);
5063 XEXP (XEXP (XEXP (x
, 0), 0), 1) = reg
;
5064 if (memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
5065 MEM_ADDR_SPACE (x
)))
5067 XEXP (XEXP (XEXP (x
, 0), 0), 1) = tem
;
5068 return &XEXP (XEXP (XEXP (x
, 0), 0), 1);
5070 XEXP (XEXP (XEXP (x
, 0), 0), 1) = tem
;
5073 /* If that didn't work, perhaps the first operand is complex and
5074 needs to be computed separately, so make a split point there.
5075 This will occur on machines that just support REG + CONST
5076 and have a constant moved through some previous computation. */
5077 if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
5078 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
5079 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
5080 return &XEXP (XEXP (x
, 0), 0);
5083 /* If we have a PLUS whose first operand is complex, try computing it
5084 separately by making a split there. */
5085 if (GET_CODE (XEXP (x
, 0)) == PLUS
5086 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
5088 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
5089 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
5090 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
5091 return &XEXP (XEXP (x
, 0), 0);
5095 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
5096 ZERO_EXTRACT, the most likely reason why this doesn't match is that
5097 we need to put the operand into a register. So split at that
5100 if (SET_DEST (x
) == cc0_rtx
5101 && GET_CODE (SET_SRC (x
)) != COMPARE
5102 && GET_CODE (SET_SRC (x
)) != ZERO_EXTRACT
5103 && !OBJECT_P (SET_SRC (x
))
5104 && ! (GET_CODE (SET_SRC (x
)) == SUBREG
5105 && OBJECT_P (SUBREG_REG (SET_SRC (x
)))))
5106 return &SET_SRC (x
);
5108 /* See if we can split SET_SRC as it stands. */
5109 split
= find_split_point (&SET_SRC (x
), insn
, true);
5110 if (split
&& split
!= &SET_SRC (x
))
5113 /* See if we can split SET_DEST as it stands. */
5114 split
= find_split_point (&SET_DEST (x
), insn
, false);
5115 if (split
&& split
!= &SET_DEST (x
))
5118 /* See if this is a bitfield assignment with everything constant. If
5119 so, this is an IOR of an AND, so split it into that. */
5120 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
5121 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_DEST (x
), 0)),
5123 && HWI_COMPUTABLE_MODE_P (inner_mode
)
5124 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
5125 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
5126 && CONST_INT_P (SET_SRC (x
))
5127 && ((INTVAL (XEXP (SET_DEST (x
), 1))
5128 + INTVAL (XEXP (SET_DEST (x
), 2)))
5129 <= GET_MODE_PRECISION (inner_mode
))
5130 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
5132 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
5133 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
5134 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
));
5135 rtx dest
= XEXP (SET_DEST (x
), 0);
5136 unsigned HOST_WIDE_INT mask
5137 = (HOST_WIDE_INT_1U
<< len
) - 1;
5140 if (BITS_BIG_ENDIAN
)
5141 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
5143 or_mask
= gen_int_mode (src
<< pos
, inner_mode
);
5146 simplify_gen_binary (IOR
, inner_mode
, dest
, or_mask
));
5149 rtx negmask
= gen_int_mode (~(mask
<< pos
), inner_mode
);
5151 simplify_gen_binary (IOR
, inner_mode
,
5152 simplify_gen_binary (AND
, inner_mode
,
5157 SUBST (SET_DEST (x
), dest
);
5159 split
= find_split_point (&SET_SRC (x
), insn
, true);
5160 if (split
&& split
!= &SET_SRC (x
))
5164 /* Otherwise, see if this is an operation that we can split into two.
5165 If so, try to split that. */
5166 code
= GET_CODE (SET_SRC (x
));
5171 /* If we are AND'ing with a large constant that is only a single
5172 bit and the result is only being used in a context where we
5173 need to know if it is zero or nonzero, replace it with a bit
5174 extraction. This will avoid the large constant, which might
5175 have taken more than one insn to make. If the constant were
5176 not a valid argument to the AND but took only one insn to make,
5177 this is no worse, but if it took more than one insn, it will
5180 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
5181 && REG_P (XEXP (SET_SRC (x
), 0))
5182 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
5183 && REG_P (SET_DEST (x
))
5184 && (split
= find_single_use (SET_DEST (x
), insn
, NULL
)) != 0
5185 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
5186 && XEXP (*split
, 0) == SET_DEST (x
)
5187 && XEXP (*split
, 1) == const0_rtx
)
5189 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
5190 XEXP (SET_SRC (x
), 0),
5191 pos
, NULL_RTX
, 1, 1, 0, 0);
5192 if (extraction
!= 0)
5194 SUBST (SET_SRC (x
), extraction
);
5195 return find_split_point (loc
, insn
, false);
5201 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5202 is known to be on, this can be converted into a NEG of a shift. */
5203 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
5204 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
5205 && ((pos
= exact_log2 (nonzero_bits (XEXP (SET_SRC (x
), 0),
5206 GET_MODE (XEXP (SET_SRC (x
),
5209 machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
5210 rtx pos_rtx
= gen_int_shift_amount (mode
, pos
);
5213 gen_rtx_LSHIFTRT (mode
,
5214 XEXP (SET_SRC (x
), 0),
5217 split
= find_split_point (&SET_SRC (x
), insn
, true);
5218 if (split
&& split
!= &SET_SRC (x
))
5224 inner
= XEXP (SET_SRC (x
), 0);
5226 /* We can't optimize if either mode is a partial integer
5227 mode as we don't know how many bits are significant
5229 if (!is_int_mode (GET_MODE (inner
), &inner_mode
)
5230 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
5234 len
= GET_MODE_PRECISION (inner_mode
);
5240 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_SRC (x
), 0)),
5242 && CONST_INT_P (XEXP (SET_SRC (x
), 1))
5243 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
5245 inner
= XEXP (SET_SRC (x
), 0);
5246 len
= INTVAL (XEXP (SET_SRC (x
), 1));
5247 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
5249 if (BITS_BIG_ENDIAN
)
5250 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
5251 unsignedp
= (code
== ZERO_EXTRACT
);
5260 && known_subrange_p (pos
, len
,
5261 0, GET_MODE_PRECISION (GET_MODE (inner
)))
5262 && is_a
<scalar_int_mode
> (GET_MODE (SET_SRC (x
)), &mode
))
5264 /* For unsigned, we have a choice of a shift followed by an
5265 AND or two shifts. Use two shifts for field sizes where the
5266 constant might be too large. We assume here that we can
5267 always at least get 8-bit constants in an AND insn, which is
5268 true for every current RISC. */
5270 if (unsignedp
&& len
<= 8)
5272 unsigned HOST_WIDE_INT mask
5273 = (HOST_WIDE_INT_1U
<< len
) - 1;
5274 rtx pos_rtx
= gen_int_shift_amount (mode
, pos
);
5278 (mode
, gen_lowpart (mode
, inner
), pos_rtx
),
5279 gen_int_mode (mask
, mode
)));
5281 split
= find_split_point (&SET_SRC (x
), insn
, true);
5282 if (split
&& split
!= &SET_SRC (x
))
5287 int left_bits
= GET_MODE_PRECISION (mode
) - len
- pos
;
5288 int right_bits
= GET_MODE_PRECISION (mode
) - len
;
5291 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
5292 gen_rtx_ASHIFT (mode
,
5293 gen_lowpart (mode
, inner
),
5294 gen_int_shift_amount (mode
, left_bits
)),
5295 gen_int_shift_amount (mode
, right_bits
)));
5297 split
= find_split_point (&SET_SRC (x
), insn
, true);
5298 if (split
&& split
!= &SET_SRC (x
))
5303 /* See if this is a simple operation with a constant as the second
5304 operand. It might be that this constant is out of range and hence
5305 could be used as a split point. */
5306 if (BINARY_P (SET_SRC (x
))
5307 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
5308 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
5309 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
5310 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
5311 return &XEXP (SET_SRC (x
), 1);
5313 /* Finally, see if this is a simple operation with its first operand
5314 not in a register. The operation might require this operand in a
5315 register, so return it as a split point. We can always do this
5316 because if the first operand were another operation, we would have
5317 already found it as a split point. */
5318 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
5319 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
5320 return &XEXP (SET_SRC (x
), 0);
5326 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5327 it is better to write this as (not (ior A B)) so we can split it.
5328 Similarly for IOR. */
5329 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
5332 gen_rtx_NOT (GET_MODE (x
),
5333 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
5335 XEXP (XEXP (x
, 0), 0),
5336 XEXP (XEXP (x
, 1), 0))));
5337 return find_split_point (loc
, insn
, set_src
);
5340 /* Many RISC machines have a large set of logical insns. If the
5341 second operand is a NOT, put it first so we will try to split the
5342 other operand first. */
5343 if (GET_CODE (XEXP (x
, 1)) == NOT
)
5345 rtx tem
= XEXP (x
, 0);
5346 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5347 SUBST (XEXP (x
, 1), tem
);
5353 /* Canonicalization can produce (minus A (mult B C)), where C is a
5354 constant. It may be better to try splitting (plus (mult B -C) A)
5355 instead if this isn't a multiply by a power of two. */
5356 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
5357 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5358 && !pow2p_hwi (INTVAL (XEXP (XEXP (x
, 1), 1))))
5360 machine_mode mode
= GET_MODE (x
);
5361 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
5362 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
5363 SUBST (*loc
, gen_rtx_PLUS (mode
,
5365 XEXP (XEXP (x
, 1), 0),
5366 gen_int_mode (other_int
,
5369 return find_split_point (loc
, insn
, set_src
);
5372 /* Split at a multiply-accumulate instruction. However if this is
5373 the SET_SRC, we likely do not have such an instruction and it's
5374 worthless to try this split. */
5376 && (GET_CODE (XEXP (x
, 0)) == MULT
5377 || (GET_CODE (XEXP (x
, 0)) == ASHIFT
5378 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
5385 /* Otherwise, select our actions depending on our rtx class. */
5386 switch (GET_RTX_CLASS (code
))
5388 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5390 split
= find_split_point (&XEXP (x
, 2), insn
, false);
5395 case RTX_COMM_ARITH
:
5397 case RTX_COMM_COMPARE
:
5398 split
= find_split_point (&XEXP (x
, 1), insn
, false);
5403 /* Some machines have (and (shift ...) ...) insns. If X is not
5404 an AND, but XEXP (X, 0) is, use it as our split point. */
5405 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
5406 return &XEXP (x
, 0);
5408 split
= find_split_point (&XEXP (x
, 0), insn
, false);
5414 /* Otherwise, we don't have a split point. */
5419 /* Throughout X, replace FROM with TO, and return the result.
5420 The result is TO if X is FROM;
5421 otherwise the result is X, but its contents may have been modified.
5422 If they were modified, a record was made in undobuf so that
5423 undo_all will (among other things) return X to its original state.
5425 If the number of changes necessary is too much to record to undo,
5426 the excess changes are not made, so the result is invalid.
5427 The changes already made can still be undone.
5428 undobuf.num_undo is incremented for such changes, so by testing that
5429 the caller can tell whether the result is valid.
5431 `n_occurrences' is incremented each time FROM is replaced.
5433 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5435 IN_COND is nonzero if we are at the top level of a condition.
5437 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5438 by copying if `n_occurrences' is nonzero. */
5441 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int in_cond
, int unique_copy
)
5443 enum rtx_code code
= GET_CODE (x
);
5444 machine_mode op0_mode
= VOIDmode
;
5449 /* Two expressions are equal if they are identical copies of a shared
5450 RTX or if they are both registers with the same register number
5453 #define COMBINE_RTX_EQUAL_P(X,Y) \
5455 || (REG_P (X) && REG_P (Y) \
5456 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5458 /* Do not substitute into clobbers of regs -- this will never result in
5460 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
5463 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
5466 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
5469 /* If X and FROM are the same register but different modes, they
5470 will not have been seen as equal above. However, the log links code
5471 will make a LOG_LINKS entry for that case. If we do nothing, we
5472 will try to rerecognize our original insn and, when it succeeds,
5473 we will delete the feeding insn, which is incorrect.
5475 So force this insn not to match in this (rare) case. */
5476 if (! in_dest
&& code
== REG
&& REG_P (from
)
5477 && reg_overlap_mentioned_p (x
, from
))
5478 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5480 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5481 of which may contain things that can be combined. */
5482 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5485 /* It is possible to have a subexpression appear twice in the insn.
5486 Suppose that FROM is a register that appears within TO.
5487 Then, after that subexpression has been scanned once by `subst',
5488 the second time it is scanned, TO may be found. If we were
5489 to scan TO here, we would find FROM within it and create a
5490 self-referent rtl structure which is completely wrong. */
5491 if (COMBINE_RTX_EQUAL_P (x
, to
))
5494 /* Parallel asm_operands need special attention because all of the
5495 inputs are shared across the arms. Furthermore, unsharing the
5496 rtl results in recognition failures. Failure to handle this case
5497 specially can result in circular rtl.
5499 Solve this by doing a normal pass across the first entry of the
5500 parallel, and only processing the SET_DESTs of the subsequent
5503 if (code
== PARALLEL
5504 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5505 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5507 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, 0, unique_copy
);
5509 /* If this substitution failed, this whole thing fails. */
5510 if (GET_CODE (new_rtx
) == CLOBBER
5511 && XEXP (new_rtx
, 0) == const0_rtx
)
5514 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5516 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5518 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5521 && GET_CODE (dest
) != CC0
5522 && GET_CODE (dest
) != PC
)
5524 new_rtx
= subst (dest
, from
, to
, 0, 0, unique_copy
);
5526 /* If this substitution failed, this whole thing fails. */
5527 if (GET_CODE (new_rtx
) == CLOBBER
5528 && XEXP (new_rtx
, 0) == const0_rtx
)
5531 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5537 len
= GET_RTX_LENGTH (code
);
5538 fmt
= GET_RTX_FORMAT (code
);
5540 /* We don't need to process a SET_DEST that is a register, CC0,
5541 or PC, so set up to skip this common case. All other cases
5542 where we want to suppress replacing something inside a
5543 SET_SRC are handled via the IN_DEST operand. */
5545 && (REG_P (SET_DEST (x
))
5546 || GET_CODE (SET_DEST (x
)) == CC0
5547 || GET_CODE (SET_DEST (x
)) == PC
))
5550 /* Trying to simplify the operands of a widening MULT is not likely
5551 to create RTL matching a machine insn. */
5553 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
5554 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
5555 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
5556 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
5557 && REG_P (XEXP (XEXP (x
, 0), 0))
5558 && REG_P (XEXP (XEXP (x
, 1), 0))
5563 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5566 op0_mode
= GET_MODE (XEXP (x
, 0));
5568 for (i
= 0; i
< len
; i
++)
5573 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5575 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5577 new_rtx
= (unique_copy
&& n_occurrences
5578 ? copy_rtx (to
) : to
);
5583 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0, 0,
5586 /* If this substitution failed, this whole thing
5588 if (GET_CODE (new_rtx
) == CLOBBER
5589 && XEXP (new_rtx
, 0) == const0_rtx
)
5593 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5596 else if (fmt
[i
] == 'e')
5598 /* If this is a register being set, ignore it. */
5599 new_rtx
= XEXP (x
, i
);
5602 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5604 || code
== STRICT_LOW_PART
))
5607 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5609 /* In general, don't install a subreg involving two
5610 modes not tieable. It can worsen register
5611 allocation, and can even make invalid reload
5612 insns, since the reg inside may need to be copied
5613 from in the outside mode, and that may be invalid
5614 if it is an fp reg copied in integer mode.
5616 We allow two exceptions to this: It is valid if
5617 it is inside another SUBREG and the mode of that
5618 SUBREG and the mode of the inside of TO is
5619 tieable and it is valid if X is a SET that copies
5622 if (GET_CODE (to
) == SUBREG
5623 && !targetm
.modes_tieable_p (GET_MODE (to
),
5624 GET_MODE (SUBREG_REG (to
)))
5625 && ! (code
== SUBREG
5626 && (targetm
.modes_tieable_p
5627 (GET_MODE (x
), GET_MODE (SUBREG_REG (to
)))))
5631 && XEXP (x
, 0) == cc0_rtx
))))
5632 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5636 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5637 && simplify_subreg_regno (REGNO (to
), GET_MODE (to
),
5640 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5642 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5646 /* If we are in a SET_DEST, suppress most cases unless we
5647 have gone inside a MEM, in which case we want to
5648 simplify the address. We assume here that things that
5649 are actually part of the destination have their inner
5650 parts in the first expression. This is true for SUBREG,
5651 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5652 things aside from REG and MEM that should appear in a
5654 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5656 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5657 || code
== ZERO_EXTRACT
))
5660 code
== IF_THEN_ELSE
&& i
== 0,
5663 /* If we found that we will have to reject this combination,
5664 indicate that by returning the CLOBBER ourselves, rather than
5665 an expression containing it. This will speed things up as
5666 well as prevent accidents where two CLOBBERs are considered
5667 to be equal, thus producing an incorrect simplification. */
5669 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5672 if (GET_CODE (x
) == SUBREG
&& CONST_SCALAR_INT_P (new_rtx
))
5674 machine_mode mode
= GET_MODE (x
);
5676 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5677 GET_MODE (SUBREG_REG (x
)),
5680 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5682 else if (CONST_SCALAR_INT_P (new_rtx
)
5683 && (GET_CODE (x
) == ZERO_EXTEND
5684 || GET_CODE (x
) == SIGN_EXTEND
5685 || GET_CODE (x
) == FLOAT
5686 || GET_CODE (x
) == UNSIGNED_FLOAT
))
5688 x
= simplify_unary_operation (GET_CODE (x
), GET_MODE (x
),
5690 GET_MODE (XEXP (x
, 0)));
5692 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5695 SUBST (XEXP (x
, i
), new_rtx
);
5700 /* Check if we are loading something from the constant pool via float
5701 extension; in this case we would undo compress_float_constant
5702 optimization and degenerate constant load to an immediate value. */
5703 if (GET_CODE (x
) == FLOAT_EXTEND
5704 && MEM_P (XEXP (x
, 0))
5705 && MEM_READONLY_P (XEXP (x
, 0)))
5707 rtx tmp
= avoid_constant_pool_reference (x
);
5712 /* Try to simplify X. If the simplification changed the code, it is likely
5713 that further simplification will help, so loop, but limit the number
5714 of repetitions that will be performed. */
5716 for (i
= 0; i
< 4; i
++)
5718 /* If X is sufficiently simple, don't bother trying to do anything
5720 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5721 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
, in_cond
);
5723 if (GET_CODE (x
) == code
)
5726 code
= GET_CODE (x
);
5728 /* We no longer know the original mode of operand 0 since we
5729 have changed the form of X) */
5730 op0_mode
= VOIDmode
;
5736 /* If X is a commutative operation whose operands are not in the canonical
5737 order, use substitutions to swap them. */
5740 maybe_swap_commutative_operands (rtx x
)
5742 if (COMMUTATIVE_ARITH_P (x
)
5743 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5745 rtx temp
= XEXP (x
, 0);
5746 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5747 SUBST (XEXP (x
, 1), temp
);
5751 /* Simplify X, a piece of RTL. We just operate on the expression at the
5752 outer level; call `subst' to simplify recursively. Return the new
5755 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5756 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5760 combine_simplify_rtx (rtx x
, machine_mode op0_mode
, int in_dest
,
5763 enum rtx_code code
= GET_CODE (x
);
5764 machine_mode mode
= GET_MODE (x
);
5765 scalar_int_mode int_mode
;
5769 /* If this is a commutative operation, put a constant last and a complex
5770 expression first. We don't need to do this for comparisons here. */
5771 maybe_swap_commutative_operands (x
);
5773 /* Try to fold this expression in case we have constants that weren't
5776 switch (GET_RTX_CLASS (code
))
5779 if (op0_mode
== VOIDmode
)
5780 op0_mode
= GET_MODE (XEXP (x
, 0));
5781 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5784 case RTX_COMM_COMPARE
:
5786 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5787 if (cmp_mode
== VOIDmode
)
5789 cmp_mode
= GET_MODE (XEXP (x
, 1));
5790 if (cmp_mode
== VOIDmode
)
5791 cmp_mode
= op0_mode
;
5793 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5794 XEXP (x
, 0), XEXP (x
, 1));
5797 case RTX_COMM_ARITH
:
5799 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5801 case RTX_BITFIELD_OPS
:
5803 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5804 XEXP (x
, 1), XEXP (x
, 2));
5813 code
= GET_CODE (temp
);
5814 op0_mode
= VOIDmode
;
5815 mode
= GET_MODE (temp
);
5818 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5819 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5820 things. Check for cases where both arms are testing the same
5823 Don't do anything if all operands are very simple. */
5826 && ((!OBJECT_P (XEXP (x
, 0))
5827 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5828 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5829 || (!OBJECT_P (XEXP (x
, 1))
5830 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5831 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5833 && (!OBJECT_P (XEXP (x
, 0))
5834 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5835 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5837 rtx cond
, true_rtx
, false_rtx
;
5839 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5841 /* If everything is a comparison, what we have is highly unlikely
5842 to be simpler, so don't use it. */
5843 && ! (COMPARISON_P (x
)
5844 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
)))
5845 /* Similarly, if we end up with one of the expressions the same
5846 as the original, it is certainly not simpler. */
5847 && ! rtx_equal_p (x
, true_rtx
)
5848 && ! rtx_equal_p (x
, false_rtx
))
5850 rtx cop1
= const0_rtx
;
5851 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5853 if (cond_code
== NE
&& COMPARISON_P (cond
))
5856 /* Simplify the alternative arms; this may collapse the true and
5857 false arms to store-flag values. Be careful to use copy_rtx
5858 here since true_rtx or false_rtx might share RTL with x as a
5859 result of the if_then_else_cond call above. */
5860 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5861 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5863 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5864 is unlikely to be simpler. */
5865 if (general_operand (true_rtx
, VOIDmode
)
5866 && general_operand (false_rtx
, VOIDmode
))
5868 enum rtx_code reversed
;
5870 /* Restarting if we generate a store-flag expression will cause
5871 us to loop. Just drop through in this case. */
5873 /* If the result values are STORE_FLAG_VALUE and zero, we can
5874 just make the comparison operation. */
5875 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5876 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5878 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5879 && ((reversed
= reversed_comparison_code_parts
5880 (cond_code
, cond
, cop1
, NULL
))
5882 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5885 /* Likewise, we can make the negate of a comparison operation
5886 if the result values are - STORE_FLAG_VALUE and zero. */
5887 else if (CONST_INT_P (true_rtx
)
5888 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5889 && false_rtx
== const0_rtx
)
5890 x
= simplify_gen_unary (NEG
, mode
,
5891 simplify_gen_relational (cond_code
,
5895 else if (CONST_INT_P (false_rtx
)
5896 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5897 && true_rtx
== const0_rtx
5898 && ((reversed
= reversed_comparison_code_parts
5899 (cond_code
, cond
, cop1
, NULL
))
5901 x
= simplify_gen_unary (NEG
, mode
,
5902 simplify_gen_relational (reversed
,
5907 code
= GET_CODE (x
);
5908 op0_mode
= VOIDmode
;
5913 /* First see if we can apply the inverse distributive law. */
5914 if (code
== PLUS
|| code
== MINUS
5915 || code
== AND
|| code
== IOR
|| code
== XOR
)
5917 x
= apply_distributive_law (x
);
5918 code
= GET_CODE (x
);
5919 op0_mode
= VOIDmode
;
5922 /* If CODE is an associative operation not otherwise handled, see if we
5923 can associate some operands. This can win if they are constants or
5924 if they are logically related (i.e. (a & b) & a). */
5925 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5926 || code
== AND
|| code
== IOR
|| code
== XOR
5927 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5928 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5929 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5931 if (GET_CODE (XEXP (x
, 0)) == code
)
5933 rtx other
= XEXP (XEXP (x
, 0), 0);
5934 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5935 rtx inner_op1
= XEXP (x
, 1);
5938 /* Make sure we pass the constant operand if any as the second
5939 one if this is a commutative operation. */
5940 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5941 std::swap (inner_op0
, inner_op1
);
5942 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5943 : code
== DIV
? MULT
5945 mode
, inner_op0
, inner_op1
);
5947 /* For commutative operations, try the other pair if that one
5949 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5951 other
= XEXP (XEXP (x
, 0), 1);
5952 inner
= simplify_binary_operation (code
, mode
,
5953 XEXP (XEXP (x
, 0), 0),
5958 return simplify_gen_binary (code
, mode
, other
, inner
);
5962 /* A little bit of algebraic simplification here. */
5966 /* Ensure that our address has any ASHIFTs converted to MULT in case
5967 address-recognizing predicates are called later. */
5968 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5969 SUBST (XEXP (x
, 0), temp
);
5973 if (op0_mode
== VOIDmode
)
5974 op0_mode
= GET_MODE (SUBREG_REG (x
));
5976 /* See if this can be moved to simplify_subreg. */
5977 if (CONSTANT_P (SUBREG_REG (x
))
5978 && known_eq (subreg_lowpart_offset (mode
, op0_mode
), SUBREG_BYTE (x
))
5979 /* Don't call gen_lowpart if the inner mode
5980 is VOIDmode and we cannot simplify it, as SUBREG without
5981 inner mode is invalid. */
5982 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5983 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5984 return gen_lowpart (mode
, SUBREG_REG (x
));
5986 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
5990 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
5995 /* If op is known to have all lower bits zero, the result is zero. */
5996 scalar_int_mode int_mode
, int_op0_mode
;
5998 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5999 && is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
)
6000 && (GET_MODE_PRECISION (int_mode
)
6001 < GET_MODE_PRECISION (int_op0_mode
))
6002 && known_eq (subreg_lowpart_offset (int_mode
, int_op0_mode
),
6004 && HWI_COMPUTABLE_MODE_P (int_op0_mode
)
6005 && ((nonzero_bits (SUBREG_REG (x
), int_op0_mode
)
6006 & GET_MODE_MASK (int_mode
)) == 0)
6007 && !side_effects_p (SUBREG_REG (x
)))
6008 return CONST0_RTX (int_mode
);
6011 /* Don't change the mode of the MEM if that would change the meaning
6013 if (MEM_P (SUBREG_REG (x
))
6014 && (MEM_VOLATILE_P (SUBREG_REG (x
))
6015 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0),
6016 MEM_ADDR_SPACE (SUBREG_REG (x
)))))
6017 return gen_rtx_CLOBBER (mode
, const0_rtx
);
6019 /* Note that we cannot do any narrowing for non-constants since
6020 we might have been counting on using the fact that some bits were
6021 zero. We now do this in the SET. */
6026 temp
= expand_compound_operation (XEXP (x
, 0));
6028 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
6029 replaced by (lshiftrt X C). This will convert
6030 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
6032 if (GET_CODE (temp
) == ASHIFTRT
6033 && CONST_INT_P (XEXP (temp
, 1))
6034 && INTVAL (XEXP (temp
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
6035 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
6036 INTVAL (XEXP (temp
, 1)));
6038 /* If X has only a single bit that might be nonzero, say, bit I, convert
6039 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
6040 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
6041 (sign_extract X 1 Y). But only do this if TEMP isn't a register
6042 or a SUBREG of one since we'd be making the expression more
6043 complex if it was just a register. */
6046 && ! (GET_CODE (temp
) == SUBREG
6047 && REG_P (SUBREG_REG (temp
)))
6048 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6049 && (i
= exact_log2 (nonzero_bits (temp
, int_mode
))) >= 0)
6051 rtx temp1
= simplify_shift_const
6052 (NULL_RTX
, ASHIFTRT
, int_mode
,
6053 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
, temp
,
6054 GET_MODE_PRECISION (int_mode
) - 1 - i
),
6055 GET_MODE_PRECISION (int_mode
) - 1 - i
);
6057 /* If all we did was surround TEMP with the two shifts, we
6058 haven't improved anything, so don't use it. Otherwise,
6059 we are better off with TEMP1. */
6060 if (GET_CODE (temp1
) != ASHIFTRT
6061 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
6062 || XEXP (XEXP (temp1
, 0), 0) != temp
)
6068 /* We can't handle truncation to a partial integer mode here
6069 because we don't know the real bitsize of the partial
6071 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
6074 if (HWI_COMPUTABLE_MODE_P (mode
))
6076 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
6077 GET_MODE_MASK (mode
), 0));
6079 /* We can truncate a constant value and return it. */
6082 if (poly_int_rtx_p (XEXP (x
, 0), &c
))
6083 return gen_int_mode (c
, mode
);
6086 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
6087 whose value is a comparison can be replaced with a subreg if
6088 STORE_FLAG_VALUE permits. */
6089 if (HWI_COMPUTABLE_MODE_P (mode
)
6090 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
6091 && (temp
= get_last_value (XEXP (x
, 0)))
6092 && COMPARISON_P (temp
))
6093 return gen_lowpart (mode
, XEXP (x
, 0));
6097 /* (const (const X)) can become (const X). Do it this way rather than
6098 returning the inner CONST since CONST can be shared with a
6100 if (GET_CODE (XEXP (x
, 0)) == CONST
)
6101 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
6105 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
6106 can add in an offset. find_split_point will split this address up
6107 again if it doesn't match. */
6108 if (HAVE_lo_sum
&& GET_CODE (XEXP (x
, 0)) == HIGH
6109 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
6114 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
6115 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
6116 bit-field and can be replaced by either a sign_extend or a
6117 sign_extract. The `and' may be a zero_extend and the two
6118 <c>, -<c> constants may be reversed. */
6119 if (GET_CODE (XEXP (x
, 0)) == XOR
6120 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6121 && CONST_INT_P (XEXP (x
, 1))
6122 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
6123 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
6124 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
6125 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
6126 && HWI_COMPUTABLE_MODE_P (int_mode
)
6127 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
6128 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
6129 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
6130 == (HOST_WIDE_INT_1U
<< (i
+ 1)) - 1))
6131 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
6132 && known_eq ((GET_MODE_PRECISION
6133 (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))),
6134 (unsigned int) i
+ 1))))
6135 return simplify_shift_const
6136 (NULL_RTX
, ASHIFTRT
, int_mode
,
6137 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6138 XEXP (XEXP (XEXP (x
, 0), 0), 0),
6139 GET_MODE_PRECISION (int_mode
) - (i
+ 1)),
6140 GET_MODE_PRECISION (int_mode
) - (i
+ 1));
6142 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6143 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6144 the bitsize of the mode - 1. This allows simplification of
6145 "a = (b & 8) == 0;" */
6146 if (XEXP (x
, 1) == constm1_rtx
6147 && !REG_P (XEXP (x
, 0))
6148 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
6149 && REG_P (SUBREG_REG (XEXP (x
, 0))))
6150 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6151 && nonzero_bits (XEXP (x
, 0), int_mode
) == 1)
6152 return simplify_shift_const
6153 (NULL_RTX
, ASHIFTRT
, int_mode
,
6154 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6155 gen_rtx_XOR (int_mode
, XEXP (x
, 0),
6157 GET_MODE_PRECISION (int_mode
) - 1),
6158 GET_MODE_PRECISION (int_mode
) - 1);
6160 /* If we are adding two things that have no bits in common, convert
6161 the addition into an IOR. This will often be further simplified,
6162 for example in cases like ((a & 1) + (a & 2)), which can
6165 if (HWI_COMPUTABLE_MODE_P (mode
)
6166 && (nonzero_bits (XEXP (x
, 0), mode
)
6167 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
6169 /* Try to simplify the expression further. */
6170 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6171 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
, 0);
6173 /* If we could, great. If not, do not go ahead with the IOR
6174 replacement, since PLUS appears in many special purpose
6175 address arithmetic instructions. */
6176 if (GET_CODE (temp
) != CLOBBER
6177 && (GET_CODE (temp
) != IOR
6178 || ((XEXP (temp
, 0) != XEXP (x
, 0)
6179 || XEXP (temp
, 1) != XEXP (x
, 1))
6180 && (XEXP (temp
, 0) != XEXP (x
, 1)
6181 || XEXP (temp
, 1) != XEXP (x
, 0)))))
6185 /* Canonicalize x + x into x << 1. */
6186 if (GET_MODE_CLASS (mode
) == MODE_INT
6187 && rtx_equal_p (XEXP (x
, 0), XEXP (x
, 1))
6188 && !side_effects_p (XEXP (x
, 0)))
6189 return simplify_gen_binary (ASHIFT
, mode
, XEXP (x
, 0), const1_rtx
);
6194 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6195 (and <foo> (const_int pow2-1)) */
6196 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6197 && GET_CODE (XEXP (x
, 1)) == AND
6198 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
6199 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x
, 1), 1)))
6200 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
6201 return simplify_and_const_int (NULL_RTX
, int_mode
, XEXP (x
, 0),
6202 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
6206 /* If we have (mult (plus A B) C), apply the distributive law and then
6207 the inverse distributive law to see if things simplify. This
6208 occurs mostly in addresses, often when unrolling loops. */
6210 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
6212 rtx result
= distribute_and_simplify_rtx (x
, 0);
6217 /* Try simplify a*(b/c) as (a*b)/c. */
6218 if (FLOAT_MODE_P (mode
) && flag_associative_math
6219 && GET_CODE (XEXP (x
, 0)) == DIV
)
6221 rtx tem
= simplify_binary_operation (MULT
, mode
,
6222 XEXP (XEXP (x
, 0), 0),
6225 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
6230 /* If this is a divide by a power of two, treat it as a shift if
6231 its first operand is a shift. */
6232 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6233 && CONST_INT_P (XEXP (x
, 1))
6234 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
6235 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
6236 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
6237 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
6238 || GET_CODE (XEXP (x
, 0)) == ROTATE
6239 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
6240 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, int_mode
,
6245 case GT
: case GTU
: case GE
: case GEU
:
6246 case LT
: case LTU
: case LE
: case LEU
:
6247 case UNEQ
: case LTGT
:
6248 case UNGT
: case UNGE
:
6249 case UNLT
: case UNLE
:
6250 case UNORDERED
: case ORDERED
:
6251 /* If the first operand is a condition code, we can't do anything
6253 if (GET_CODE (XEXP (x
, 0)) == COMPARE
6254 || (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
6255 && ! CC0_P (XEXP (x
, 0))))
6257 rtx op0
= XEXP (x
, 0);
6258 rtx op1
= XEXP (x
, 1);
6259 enum rtx_code new_code
;
6261 if (GET_CODE (op0
) == COMPARE
)
6262 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
6264 /* Simplify our comparison, if possible. */
6265 new_code
= simplify_comparison (code
, &op0
, &op1
);
6267 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6268 if only the low-order bit is possibly nonzero in X (such as when
6269 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6270 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6271 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6274 Remove any ZERO_EXTRACT we made when thinking this was a
6275 comparison. It may now be simpler to use, e.g., an AND. If a
6276 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6277 the call to make_compound_operation in the SET case.
6279 Don't apply these optimizations if the caller would
6280 prefer a comparison rather than a value.
6281 E.g., for the condition in an IF_THEN_ELSE most targets need
6282 an explicit comparison. */
6287 else if (STORE_FLAG_VALUE
== 1
6289 && is_int_mode (mode
, &int_mode
)
6290 && op1
== const0_rtx
6291 && int_mode
== GET_MODE (op0
)
6292 && nonzero_bits (op0
, int_mode
) == 1)
6293 return gen_lowpart (int_mode
,
6294 expand_compound_operation (op0
));
6296 else if (STORE_FLAG_VALUE
== 1
6298 && is_int_mode (mode
, &int_mode
)
6299 && op1
== const0_rtx
6300 && int_mode
== GET_MODE (op0
)
6301 && (num_sign_bit_copies (op0
, int_mode
)
6302 == GET_MODE_PRECISION (int_mode
)))
6304 op0
= expand_compound_operation (op0
);
6305 return simplify_gen_unary (NEG
, int_mode
,
6306 gen_lowpart (int_mode
, op0
),
6310 else if (STORE_FLAG_VALUE
== 1
6312 && is_int_mode (mode
, &int_mode
)
6313 && op1
== const0_rtx
6314 && int_mode
== GET_MODE (op0
)
6315 && nonzero_bits (op0
, int_mode
) == 1)
6317 op0
= expand_compound_operation (op0
);
6318 return simplify_gen_binary (XOR
, int_mode
,
6319 gen_lowpart (int_mode
, op0
),
6323 else if (STORE_FLAG_VALUE
== 1
6325 && is_int_mode (mode
, &int_mode
)
6326 && op1
== const0_rtx
6327 && int_mode
== GET_MODE (op0
)
6328 && (num_sign_bit_copies (op0
, int_mode
)
6329 == GET_MODE_PRECISION (int_mode
)))
6331 op0
= expand_compound_operation (op0
);
6332 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), 1);
6335 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6340 else if (STORE_FLAG_VALUE
== -1
6342 && is_int_mode (mode
, &int_mode
)
6343 && op1
== const0_rtx
6344 && int_mode
== GET_MODE (op0
)
6345 && (num_sign_bit_copies (op0
, int_mode
)
6346 == GET_MODE_PRECISION (int_mode
)))
6347 return gen_lowpart (int_mode
, expand_compound_operation (op0
));
6349 else if (STORE_FLAG_VALUE
== -1
6351 && is_int_mode (mode
, &int_mode
)
6352 && op1
== const0_rtx
6353 && int_mode
== GET_MODE (op0
)
6354 && nonzero_bits (op0
, int_mode
) == 1)
6356 op0
= expand_compound_operation (op0
);
6357 return simplify_gen_unary (NEG
, int_mode
,
6358 gen_lowpart (int_mode
, op0
),
6362 else if (STORE_FLAG_VALUE
== -1
6364 && is_int_mode (mode
, &int_mode
)
6365 && op1
== const0_rtx
6366 && int_mode
== GET_MODE (op0
)
6367 && (num_sign_bit_copies (op0
, int_mode
)
6368 == GET_MODE_PRECISION (int_mode
)))
6370 op0
= expand_compound_operation (op0
);
6371 return simplify_gen_unary (NOT
, int_mode
,
6372 gen_lowpart (int_mode
, op0
),
6376 /* If X is 0/1, (eq X 0) is X-1. */
6377 else if (STORE_FLAG_VALUE
== -1
6379 && is_int_mode (mode
, &int_mode
)
6380 && op1
== const0_rtx
6381 && int_mode
== GET_MODE (op0
)
6382 && nonzero_bits (op0
, int_mode
) == 1)
6384 op0
= expand_compound_operation (op0
);
6385 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), -1);
6388 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6389 one bit that might be nonzero, we can convert (ne x 0) to
6390 (ashift x c) where C puts the bit in the sign bit. Remove any
6391 AND with STORE_FLAG_VALUE when we are done, since we are only
6392 going to test the sign bit. */
6394 && is_int_mode (mode
, &int_mode
)
6395 && HWI_COMPUTABLE_MODE_P (int_mode
)
6396 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
6397 && op1
== const0_rtx
6398 && int_mode
== GET_MODE (op0
)
6399 && (i
= exact_log2 (nonzero_bits (op0
, int_mode
))) >= 0)
6401 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6402 expand_compound_operation (op0
),
6403 GET_MODE_PRECISION (int_mode
) - 1 - i
);
6404 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
6410 /* If the code changed, return a whole new comparison.
6411 We also need to avoid using SUBST in cases where
6412 simplify_comparison has widened a comparison with a CONST_INT,
6413 since in that case the wider CONST_INT may fail the sanity
6414 checks in do_SUBST. */
6415 if (new_code
!= code
6416 || (CONST_INT_P (op1
)
6417 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 0))
6418 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 1))))
6419 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
6421 /* Otherwise, keep this operation, but maybe change its operands.
6422 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6423 SUBST (XEXP (x
, 0), op0
);
6424 SUBST (XEXP (x
, 1), op1
);
6429 return simplify_if_then_else (x
);
6435 /* If we are processing SET_DEST, we are done. */
6439 return expand_compound_operation (x
);
6442 return simplify_set (x
);
6446 return simplify_logical (x
);
6453 /* If this is a shift by a constant amount, simplify it. */
6454 if (CONST_INT_P (XEXP (x
, 1)))
6455 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
6456 INTVAL (XEXP (x
, 1)));
6458 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
6460 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
6462 << exact_log2 (GET_MODE_UNIT_BITSIZE
6475 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6478 simplify_if_then_else (rtx x
)
6480 machine_mode mode
= GET_MODE (x
);
6481 rtx cond
= XEXP (x
, 0);
6482 rtx true_rtx
= XEXP (x
, 1);
6483 rtx false_rtx
= XEXP (x
, 2);
6484 enum rtx_code true_code
= GET_CODE (cond
);
6485 int comparison_p
= COMPARISON_P (cond
);
6488 enum rtx_code false_code
;
6490 scalar_int_mode int_mode
, inner_mode
;
6492 /* Simplify storing of the truth value. */
6493 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
6494 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
6495 XEXP (cond
, 0), XEXP (cond
, 1));
6497 /* Also when the truth value has to be reversed. */
6499 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
6500 && (reversed
= reversed_comparison (cond
, mode
)))
6503 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6504 in it is being compared against certain values. Get the true and false
6505 comparisons and see if that says anything about the value of each arm. */
6508 && ((false_code
= reversed_comparison_code (cond
, NULL
))
6510 && REG_P (XEXP (cond
, 0)))
6513 rtx from
= XEXP (cond
, 0);
6514 rtx true_val
= XEXP (cond
, 1);
6515 rtx false_val
= true_val
;
6518 /* If FALSE_CODE is EQ, swap the codes and arms. */
6520 if (false_code
== EQ
)
6522 swapped
= 1, true_code
= EQ
, false_code
= NE
;
6523 std::swap (true_rtx
, false_rtx
);
6526 scalar_int_mode from_mode
;
6527 if (is_a
<scalar_int_mode
> (GET_MODE (from
), &from_mode
))
6529 /* If we are comparing against zero and the expression being
6530 tested has only a single bit that might be nonzero, that is
6531 its value when it is not equal to zero. Similarly if it is
6532 known to be -1 or 0. */
6534 && true_val
== const0_rtx
6535 && pow2p_hwi (nzb
= nonzero_bits (from
, from_mode
)))
6538 false_val
= gen_int_mode (nzb
, from_mode
);
6540 else if (true_code
== EQ
6541 && true_val
== const0_rtx
6542 && (num_sign_bit_copies (from
, from_mode
)
6543 == GET_MODE_PRECISION (from_mode
)))
6546 false_val
= constm1_rtx
;
6550 /* Now simplify an arm if we know the value of the register in the
6551 branch and it is used in the arm. Be careful due to the potential
6552 of locally-shared RTL. */
6554 if (reg_mentioned_p (from
, true_rtx
))
6555 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
6557 pc_rtx
, pc_rtx
, 0, 0, 0);
6558 if (reg_mentioned_p (from
, false_rtx
))
6559 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
6561 pc_rtx
, pc_rtx
, 0, 0, 0);
6563 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
6564 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
6566 true_rtx
= XEXP (x
, 1);
6567 false_rtx
= XEXP (x
, 2);
6568 true_code
= GET_CODE (cond
);
6571 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6572 reversed, do so to avoid needing two sets of patterns for
6573 subtract-and-branch insns. Similarly if we have a constant in the true
6574 arm, the false arm is the same as the first operand of the comparison, or
6575 the false arm is more complicated than the true arm. */
6578 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6579 && (true_rtx
== pc_rtx
6580 || (CONSTANT_P (true_rtx
)
6581 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6582 || true_rtx
== const0_rtx
6583 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6584 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6585 && !OBJECT_P (false_rtx
))
6586 || reg_mentioned_p (true_rtx
, false_rtx
)
6587 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6589 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6590 SUBST (XEXP (x
, 1), false_rtx
);
6591 SUBST (XEXP (x
, 2), true_rtx
);
6593 std::swap (true_rtx
, false_rtx
);
6596 /* It is possible that the conditional has been simplified out. */
6597 true_code
= GET_CODE (cond
);
6598 comparison_p
= COMPARISON_P (cond
);
6601 /* If the two arms are identical, we don't need the comparison. */
6603 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6606 /* Convert a == b ? b : a to "a". */
6607 if (true_code
== EQ
&& ! side_effects_p (cond
)
6608 && !HONOR_NANS (mode
)
6609 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6610 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6612 else if (true_code
== NE
&& ! side_effects_p (cond
)
6613 && !HONOR_NANS (mode
)
6614 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6615 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6618 /* Look for cases where we have (abs x) or (neg (abs X)). */
6620 if (GET_MODE_CLASS (mode
) == MODE_INT
6622 && XEXP (cond
, 1) == const0_rtx
6623 && GET_CODE (false_rtx
) == NEG
6624 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6625 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6626 && ! side_effects_p (true_rtx
))
6631 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6635 simplify_gen_unary (NEG
, mode
,
6636 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6642 /* Look for MIN or MAX. */
6644 if ((! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
6646 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6647 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6648 && ! side_effects_p (cond
))
6653 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6656 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6659 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6662 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6667 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6668 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6669 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6670 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6671 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6672 neither 1 or -1, but it isn't worth checking for. */
6674 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6676 && is_int_mode (mode
, &int_mode
)
6677 && ! side_effects_p (x
))
6679 rtx t
= make_compound_operation (true_rtx
, SET
);
6680 rtx f
= make_compound_operation (false_rtx
, SET
);
6681 rtx cond_op0
= XEXP (cond
, 0);
6682 rtx cond_op1
= XEXP (cond
, 1);
6683 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6684 scalar_int_mode m
= int_mode
;
6685 rtx z
= 0, c1
= NULL_RTX
;
6687 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6688 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6689 || GET_CODE (t
) == ASHIFT
6690 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6691 && rtx_equal_p (XEXP (t
, 0), f
))
6692 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6694 /* If an identity-zero op is commutative, check whether there
6695 would be a match if we swapped the operands. */
6696 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6697 || GET_CODE (t
) == XOR
)
6698 && rtx_equal_p (XEXP (t
, 1), f
))
6699 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6700 else if (GET_CODE (t
) == SIGN_EXTEND
6701 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6702 && (GET_CODE (XEXP (t
, 0)) == PLUS
6703 || GET_CODE (XEXP (t
, 0)) == MINUS
6704 || GET_CODE (XEXP (t
, 0)) == IOR
6705 || GET_CODE (XEXP (t
, 0)) == XOR
6706 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6707 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6708 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6709 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6710 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6711 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6712 && (num_sign_bit_copies (f
, GET_MODE (f
))
6714 (GET_MODE_PRECISION (int_mode
)
6715 - GET_MODE_PRECISION (inner_mode
))))
6717 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6718 extend_op
= SIGN_EXTEND
;
6721 else if (GET_CODE (t
) == SIGN_EXTEND
6722 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6723 && (GET_CODE (XEXP (t
, 0)) == PLUS
6724 || GET_CODE (XEXP (t
, 0)) == IOR
6725 || GET_CODE (XEXP (t
, 0)) == XOR
)
6726 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6727 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6728 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6729 && (num_sign_bit_copies (f
, GET_MODE (f
))
6731 (GET_MODE_PRECISION (int_mode
)
6732 - GET_MODE_PRECISION (inner_mode
))))
6734 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6735 extend_op
= SIGN_EXTEND
;
6738 else if (GET_CODE (t
) == ZERO_EXTEND
6739 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6740 && (GET_CODE (XEXP (t
, 0)) == PLUS
6741 || GET_CODE (XEXP (t
, 0)) == MINUS
6742 || GET_CODE (XEXP (t
, 0)) == IOR
6743 || GET_CODE (XEXP (t
, 0)) == XOR
6744 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6745 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6746 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6747 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6748 && HWI_COMPUTABLE_MODE_P (int_mode
)
6749 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6750 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6751 && ((nonzero_bits (f
, GET_MODE (f
))
6752 & ~GET_MODE_MASK (inner_mode
))
6755 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6756 extend_op
= ZERO_EXTEND
;
6759 else if (GET_CODE (t
) == ZERO_EXTEND
6760 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6761 && (GET_CODE (XEXP (t
, 0)) == PLUS
6762 || GET_CODE (XEXP (t
, 0)) == IOR
6763 || GET_CODE (XEXP (t
, 0)) == XOR
)
6764 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6765 && HWI_COMPUTABLE_MODE_P (int_mode
)
6766 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6767 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6768 && ((nonzero_bits (f
, GET_MODE (f
))
6769 & ~GET_MODE_MASK (inner_mode
))
6772 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6773 extend_op
= ZERO_EXTEND
;
6779 machine_mode cm
= m
;
6780 if ((op
== ASHIFT
|| op
== LSHIFTRT
|| op
== ASHIFTRT
)
6781 && GET_MODE (c1
) != VOIDmode
)
6783 temp
= subst (simplify_gen_relational (true_code
, cm
, VOIDmode
,
6784 cond_op0
, cond_op1
),
6785 pc_rtx
, pc_rtx
, 0, 0, 0);
6786 temp
= simplify_gen_binary (MULT
, cm
, temp
,
6787 simplify_gen_binary (MULT
, cm
, c1
,
6789 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0, 0);
6790 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6792 if (extend_op
!= UNKNOWN
)
6793 temp
= simplify_gen_unary (extend_op
, int_mode
, temp
, m
);
6799 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6800 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6801 negation of a single bit, we can convert this operation to a shift. We
6802 can actually do this more generally, but it doesn't seem worth it. */
6805 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6806 && XEXP (cond
, 1) == const0_rtx
6807 && false_rtx
== const0_rtx
6808 && CONST_INT_P (true_rtx
)
6809 && ((nonzero_bits (XEXP (cond
, 0), int_mode
) == 1
6810 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6811 || ((num_sign_bit_copies (XEXP (cond
, 0), int_mode
)
6812 == GET_MODE_PRECISION (int_mode
))
6813 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6815 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6816 gen_lowpart (int_mode
, XEXP (cond
, 0)), i
);
6818 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6819 non-zero bit in A is C1. */
6820 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6821 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6822 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6823 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (cond
, 0)), &inner_mode
)
6824 && (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))
6825 == nonzero_bits (XEXP (cond
, 0), inner_mode
)
6826 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))) >= 0)
6828 rtx val
= XEXP (cond
, 0);
6829 if (inner_mode
== int_mode
)
6831 else if (GET_MODE_PRECISION (inner_mode
) < GET_MODE_PRECISION (int_mode
))
6832 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, val
, inner_mode
);
6838 /* Simplify X, a SET expression. Return the new expression. */
6841 simplify_set (rtx x
)
6843 rtx src
= SET_SRC (x
);
6844 rtx dest
= SET_DEST (x
);
6846 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6847 rtx_insn
*other_insn
;
6849 scalar_int_mode int_mode
;
6851 /* (set (pc) (return)) gets written as (return). */
6852 if (GET_CODE (dest
) == PC
&& ANY_RETURN_P (src
))
6855 /* Now that we know for sure which bits of SRC we are using, see if we can
6856 simplify the expression for the object knowing that we only need the
6859 if (GET_MODE_CLASS (mode
) == MODE_INT
&& HWI_COMPUTABLE_MODE_P (mode
))
6861 src
= force_to_mode (src
, mode
, HOST_WIDE_INT_M1U
, 0);
6862 SUBST (SET_SRC (x
), src
);
6865 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6866 the comparison result and try to simplify it unless we already have used
6867 undobuf.other_insn. */
6868 if ((GET_MODE_CLASS (mode
) == MODE_CC
6869 || GET_CODE (src
) == COMPARE
6871 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6872 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6873 && COMPARISON_P (*cc_use
)
6874 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6876 enum rtx_code old_code
= GET_CODE (*cc_use
);
6877 enum rtx_code new_code
;
6879 int other_changed
= 0;
6880 rtx inner_compare
= NULL_RTX
;
6881 machine_mode compare_mode
= GET_MODE (dest
);
6883 if (GET_CODE (src
) == COMPARE
)
6885 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6886 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6888 inner_compare
= op0
;
6889 op0
= XEXP (inner_compare
, 0), op1
= XEXP (inner_compare
, 1);
6893 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6895 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6898 new_code
= old_code
;
6899 else if (!CONSTANT_P (tmp
))
6901 new_code
= GET_CODE (tmp
);
6902 op0
= XEXP (tmp
, 0);
6903 op1
= XEXP (tmp
, 1);
6907 rtx pat
= PATTERN (other_insn
);
6908 undobuf
.other_insn
= other_insn
;
6909 SUBST (*cc_use
, tmp
);
6911 /* Attempt to simplify CC user. */
6912 if (GET_CODE (pat
) == SET
)
6914 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6915 if (new_rtx
!= NULL_RTX
)
6916 SUBST (SET_SRC (pat
), new_rtx
);
6919 /* Convert X into a no-op move. */
6920 SUBST (SET_DEST (x
), pc_rtx
);
6921 SUBST (SET_SRC (x
), pc_rtx
);
6925 /* Simplify our comparison, if possible. */
6926 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6928 #ifdef SELECT_CC_MODE
6929 /* If this machine has CC modes other than CCmode, check to see if we
6930 need to use a different CC mode here. */
6931 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6932 compare_mode
= GET_MODE (op0
);
6933 else if (inner_compare
6934 && GET_MODE_CLASS (GET_MODE (inner_compare
)) == MODE_CC
6935 && new_code
== old_code
6936 && op0
== XEXP (inner_compare
, 0)
6937 && op1
== XEXP (inner_compare
, 1))
6938 compare_mode
= GET_MODE (inner_compare
);
6940 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6942 /* If the mode changed, we have to change SET_DEST, the mode in the
6943 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6944 a hard register, just build new versions with the proper mode. If it
6945 is a pseudo, we lose unless it is only time we set the pseudo, in
6946 which case we can safely change its mode. */
6947 if (!HAVE_cc0
&& compare_mode
!= GET_MODE (dest
))
6949 if (can_change_dest_mode (dest
, 0, compare_mode
))
6951 unsigned int regno
= REGNO (dest
);
6954 if (regno
< FIRST_PSEUDO_REGISTER
)
6955 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6958 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6959 new_dest
= regno_reg_rtx
[regno
];
6962 SUBST (SET_DEST (x
), new_dest
);
6963 SUBST (XEXP (*cc_use
, 0), new_dest
);
6969 #endif /* SELECT_CC_MODE */
6971 /* If the code changed, we have to build a new comparison in
6972 undobuf.other_insn. */
6973 if (new_code
!= old_code
)
6975 int other_changed_previously
= other_changed
;
6976 unsigned HOST_WIDE_INT mask
;
6977 rtx old_cc_use
= *cc_use
;
6979 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6983 /* If the only change we made was to change an EQ into an NE or
6984 vice versa, OP0 has only one bit that might be nonzero, and OP1
6985 is zero, check if changing the user of the condition code will
6986 produce a valid insn. If it won't, we can keep the original code
6987 in that insn by surrounding our operation with an XOR. */
6989 if (((old_code
== NE
&& new_code
== EQ
)
6990 || (old_code
== EQ
&& new_code
== NE
))
6991 && ! other_changed_previously
&& op1
== const0_rtx
6992 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
6993 && pow2p_hwi (mask
= nonzero_bits (op0
, GET_MODE (op0
))))
6995 rtx pat
= PATTERN (other_insn
), note
= 0;
6997 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
6998 && ! check_asm_operands (pat
)))
7000 *cc_use
= old_cc_use
;
7003 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
), op0
,
7011 undobuf
.other_insn
= other_insn
;
7013 /* Don't generate a compare of a CC with 0, just use that CC. */
7014 if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
7016 SUBST (SET_SRC (x
), op0
);
7019 /* Otherwise, if we didn't previously have the same COMPARE we
7020 want, create it from scratch. */
7021 else if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
7022 || XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
7024 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
7030 /* Get SET_SRC in a form where we have placed back any
7031 compound expressions. Then do the checks below. */
7032 src
= make_compound_operation (src
, SET
);
7033 SUBST (SET_SRC (x
), src
);
7036 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
7037 and X being a REG or (subreg (reg)), we may be able to convert this to
7038 (set (subreg:m2 x) (op)).
7040 We can always do this if M1 is narrower than M2 because that means that
7041 we only care about the low bits of the result.
7043 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
7044 perform a narrower operation than requested since the high-order bits will
7045 be undefined. On machine where it is defined, this transformation is safe
7046 as long as M1 and M2 have the same number of words. */
7048 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
7049 && !OBJECT_P (SUBREG_REG (src
))
7050 && (known_equal_after_align_up
7051 (GET_MODE_SIZE (GET_MODE (src
)),
7052 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
))),
7054 && (WORD_REGISTER_OPERATIONS
|| !paradoxical_subreg_p (src
))
7055 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
7056 && !REG_CAN_CHANGE_MODE_P (REGNO (dest
),
7057 GET_MODE (SUBREG_REG (src
)),
7060 || (GET_CODE (dest
) == SUBREG
7061 && REG_P (SUBREG_REG (dest
)))))
7063 SUBST (SET_DEST (x
),
7064 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
7066 SUBST (SET_SRC (x
), SUBREG_REG (src
));
7068 src
= SET_SRC (x
), dest
= SET_DEST (x
);
7071 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
7074 && partial_subreg_p (src
)
7075 && subreg_lowpart_p (src
))
7077 rtx inner
= SUBREG_REG (src
);
7078 machine_mode inner_mode
= GET_MODE (inner
);
7080 /* Here we make sure that we don't have a sign bit on. */
7081 if (val_signbit_known_clear_p (GET_MODE (src
),
7082 nonzero_bits (inner
, inner_mode
)))
7084 SUBST (SET_SRC (x
), inner
);
7089 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
7090 would require a paradoxical subreg. Replace the subreg with a
7091 zero_extend to avoid the reload that would otherwise be required.
7092 Don't do this unless we have a scalar integer mode, otherwise the
7093 transformation is incorrect. */
7095 enum rtx_code extend_op
;
7096 if (paradoxical_subreg_p (src
)
7097 && MEM_P (SUBREG_REG (src
))
7098 && SCALAR_INT_MODE_P (GET_MODE (src
))
7099 && (extend_op
= load_extend_op (GET_MODE (SUBREG_REG (src
)))) != UNKNOWN
)
7102 gen_rtx_fmt_e (extend_op
, GET_MODE (src
), SUBREG_REG (src
)));
7107 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
7108 are comparing an item known to be 0 or -1 against 0, use a logical
7109 operation instead. Check for one of the arms being an IOR of the other
7110 arm with some value. We compute three terms to be IOR'ed together. In
7111 practice, at most two will be nonzero. Then we do the IOR's. */
7113 if (GET_CODE (dest
) != PC
7114 && GET_CODE (src
) == IF_THEN_ELSE
7115 && is_int_mode (GET_MODE (src
), &int_mode
)
7116 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
7117 && XEXP (XEXP (src
, 0), 1) == const0_rtx
7118 && int_mode
== GET_MODE (XEXP (XEXP (src
, 0), 0))
7119 && (!HAVE_conditional_move
7120 || ! can_conditionally_move_p (int_mode
))
7121 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0), int_mode
)
7122 == GET_MODE_PRECISION (int_mode
))
7123 && ! side_effects_p (src
))
7125 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
7126 ? XEXP (src
, 1) : XEXP (src
, 2));
7127 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
7128 ? XEXP (src
, 2) : XEXP (src
, 1));
7129 rtx term1
= const0_rtx
, term2
, term3
;
7131 if (GET_CODE (true_rtx
) == IOR
7132 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
7133 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
7134 else if (GET_CODE (true_rtx
) == IOR
7135 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
7136 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
7137 else if (GET_CODE (false_rtx
) == IOR
7138 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
7139 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
7140 else if (GET_CODE (false_rtx
) == IOR
7141 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
7142 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
7144 term2
= simplify_gen_binary (AND
, int_mode
,
7145 XEXP (XEXP (src
, 0), 0), true_rtx
);
7146 term3
= simplify_gen_binary (AND
, int_mode
,
7147 simplify_gen_unary (NOT
, int_mode
,
7148 XEXP (XEXP (src
, 0), 0),
7153 simplify_gen_binary (IOR
, int_mode
,
7154 simplify_gen_binary (IOR
, int_mode
,
7161 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7162 whole thing fail. */
7163 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
7165 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
7168 /* Convert this into a field assignment operation, if possible. */
7169 return make_field_assignment (x
);
7172 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7176 simplify_logical (rtx x
)
7178 rtx op0
= XEXP (x
, 0);
7179 rtx op1
= XEXP (x
, 1);
7180 scalar_int_mode mode
;
7182 switch (GET_CODE (x
))
7185 /* We can call simplify_and_const_int only if we don't lose
7186 any (sign) bits when converting INTVAL (op1) to
7187 "unsigned HOST_WIDE_INT". */
7188 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
7189 && CONST_INT_P (op1
)
7190 && (HWI_COMPUTABLE_MODE_P (mode
)
7191 || INTVAL (op1
) > 0))
7193 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
7194 if (GET_CODE (x
) != AND
)
7201 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7202 apply the distributive law and then the inverse distributive
7203 law to see if things simplify. */
7204 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
7206 rtx result
= distribute_and_simplify_rtx (x
, 0);
7210 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
7212 rtx result
= distribute_and_simplify_rtx (x
, 1);
7219 /* If we have (ior (and A B) C), apply the distributive law and then
7220 the inverse distributive law to see if things simplify. */
7222 if (GET_CODE (op0
) == AND
)
7224 rtx result
= distribute_and_simplify_rtx (x
, 0);
7229 if (GET_CODE (op1
) == AND
)
7231 rtx result
= distribute_and_simplify_rtx (x
, 1);
7244 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7245 operations" because they can be replaced with two more basic operations.
7246 ZERO_EXTEND is also considered "compound" because it can be replaced with
7247 an AND operation, which is simpler, though only one operation.
7249 The function expand_compound_operation is called with an rtx expression
7250 and will convert it to the appropriate shifts and AND operations,
7251 simplifying at each stage.
7253 The function make_compound_operation is called to convert an expression
7254 consisting of shifts and ANDs into the equivalent compound expression.
7255 It is the inverse of this function, loosely speaking. */
7258 expand_compound_operation (rtx x
)
7260 unsigned HOST_WIDE_INT pos
= 0, len
;
7262 unsigned int modewidth
;
7264 scalar_int_mode inner_mode
;
7266 switch (GET_CODE (x
))
7272 /* We can't necessarily use a const_int for a multiword mode;
7273 it depends on implicitly extending the value.
7274 Since we don't know the right way to extend it,
7275 we can't tell whether the implicit way is right.
7277 Even for a mode that is no wider than a const_int,
7278 we can't win, because we need to sign extend one of its bits through
7279 the rest of it, and we don't know which bit. */
7280 if (CONST_INT_P (XEXP (x
, 0)))
7283 /* Reject modes that aren't scalar integers because turning vector
7284 or complex modes into shifts causes problems. */
7285 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7288 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7289 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7290 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7291 reloaded. If not for that, MEM's would very rarely be safe.
7293 Reject modes bigger than a word, because we might not be able
7294 to reference a two-register group starting with an arbitrary register
7295 (and currently gen_lowpart might crash for a SUBREG). */
7297 if (GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7300 len
= GET_MODE_PRECISION (inner_mode
);
7301 /* If the inner object has VOIDmode (the only way this can happen
7302 is if it is an ASM_OPERANDS), we can't do anything since we don't
7303 know how much masking to do. */
7315 /* If the operand is a CLOBBER, just return it. */
7316 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
7319 if (!CONST_INT_P (XEXP (x
, 1))
7320 || !CONST_INT_P (XEXP (x
, 2)))
7323 /* Reject modes that aren't scalar integers because turning vector
7324 or complex modes into shifts causes problems. */
7325 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7328 len
= INTVAL (XEXP (x
, 1));
7329 pos
= INTVAL (XEXP (x
, 2));
7331 /* This should stay within the object being extracted, fail otherwise. */
7332 if (len
+ pos
> GET_MODE_PRECISION (inner_mode
))
7335 if (BITS_BIG_ENDIAN
)
7336 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
7344 /* We've rejected non-scalar operations by now. */
7345 scalar_int_mode mode
= as_a
<scalar_int_mode
> (GET_MODE (x
));
7347 /* Convert sign extension to zero extension, if we know that the high
7348 bit is not set, as this is easier to optimize. It will be converted
7349 back to cheaper alternative in make_extraction. */
7350 if (GET_CODE (x
) == SIGN_EXTEND
7351 && HWI_COMPUTABLE_MODE_P (mode
)
7352 && ((nonzero_bits (XEXP (x
, 0), inner_mode
)
7353 & ~(((unsigned HOST_WIDE_INT
) GET_MODE_MASK (inner_mode
)) >> 1))
7356 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, XEXP (x
, 0));
7357 rtx temp2
= expand_compound_operation (temp
);
7359 /* Make sure this is a profitable operation. */
7360 if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7361 > set_src_cost (temp2
, mode
, optimize_this_for_speed_p
))
7363 else if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7364 > set_src_cost (temp
, mode
, optimize_this_for_speed_p
))
7370 /* We can optimize some special cases of ZERO_EXTEND. */
7371 if (GET_CODE (x
) == ZERO_EXTEND
)
7373 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7374 know that the last value didn't have any inappropriate bits
7376 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7377 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7378 && HWI_COMPUTABLE_MODE_P (mode
)
7379 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), mode
)
7380 & ~GET_MODE_MASK (inner_mode
)) == 0)
7381 return XEXP (XEXP (x
, 0), 0);
7383 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7384 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7385 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7386 && subreg_lowpart_p (XEXP (x
, 0))
7387 && HWI_COMPUTABLE_MODE_P (mode
)
7388 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), mode
)
7389 & ~GET_MODE_MASK (inner_mode
)) == 0)
7390 return SUBREG_REG (XEXP (x
, 0));
7392 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7393 is a comparison and STORE_FLAG_VALUE permits. This is like
7394 the first case, but it works even when MODE is larger
7395 than HOST_WIDE_INT. */
7396 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7397 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7398 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
7399 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7400 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7401 return XEXP (XEXP (x
, 0), 0);
7403 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7404 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7405 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7406 && subreg_lowpart_p (XEXP (x
, 0))
7407 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
7408 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7409 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7410 return SUBREG_REG (XEXP (x
, 0));
7414 /* If we reach here, we want to return a pair of shifts. The inner
7415 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7416 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7417 logical depending on the value of UNSIGNEDP.
7419 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7420 converted into an AND of a shift.
7422 We must check for the case where the left shift would have a negative
7423 count. This can happen in a case like (x >> 31) & 255 on machines
7424 that can't shift by a constant. On those machines, we would first
7425 combine the shift with the AND to produce a variable-position
7426 extraction. Then the constant of 31 would be substituted in
7427 to produce such a position. */
7429 modewidth
= GET_MODE_PRECISION (mode
);
7430 if (modewidth
>= pos
+ len
)
7432 tem
= gen_lowpart (mode
, XEXP (x
, 0));
7433 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7435 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
7436 tem
, modewidth
- pos
- len
);
7437 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
7438 mode
, tem
, modewidth
- len
);
7440 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
7441 tem
= simplify_and_const_int (NULL_RTX
, mode
,
7442 simplify_shift_const (NULL_RTX
, LSHIFTRT
,
7445 (HOST_WIDE_INT_1U
<< len
) - 1);
7447 /* Any other cases we can't handle. */
7450 /* If we couldn't do this for some reason, return the original
7452 if (GET_CODE (tem
) == CLOBBER
)
7458 /* X is a SET which contains an assignment of one object into
7459 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7460 or certain SUBREGS). If possible, convert it into a series of
7463 We half-heartedly support variable positions, but do not at all
7464 support variable lengths. */
7467 expand_field_assignment (const_rtx x
)
7470 rtx pos
; /* Always counts from low bit. */
7472 rtx mask
, cleared
, masked
;
7473 scalar_int_mode compute_mode
;
7475 /* Loop until we find something we can't simplify. */
7478 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
7479 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
7481 rtx x0
= XEXP (SET_DEST (x
), 0);
7482 if (!GET_MODE_PRECISION (GET_MODE (x0
)).is_constant (&len
))
7484 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
7485 pos
= gen_int_mode (subreg_lsb (XEXP (SET_DEST (x
), 0)),
7488 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
7489 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
7491 inner
= XEXP (SET_DEST (x
), 0);
7492 if (!GET_MODE_PRECISION (GET_MODE (inner
)).is_constant (&inner_len
))
7495 len
= INTVAL (XEXP (SET_DEST (x
), 1));
7496 pos
= XEXP (SET_DEST (x
), 2);
7498 /* A constant position should stay within the width of INNER. */
7499 if (CONST_INT_P (pos
) && INTVAL (pos
) + len
> inner_len
)
7502 if (BITS_BIG_ENDIAN
)
7504 if (CONST_INT_P (pos
))
7505 pos
= GEN_INT (inner_len
- len
- INTVAL (pos
));
7506 else if (GET_CODE (pos
) == MINUS
7507 && CONST_INT_P (XEXP (pos
, 1))
7508 && INTVAL (XEXP (pos
, 1)) == inner_len
- len
)
7509 /* If position is ADJUST - X, new position is X. */
7510 pos
= XEXP (pos
, 0);
7512 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
7513 gen_int_mode (inner_len
- len
,
7519 /* If the destination is a subreg that overwrites the whole of the inner
7520 register, we can move the subreg to the source. */
7521 else if (GET_CODE (SET_DEST (x
)) == SUBREG
7522 /* We need SUBREGs to compute nonzero_bits properly. */
7523 && nonzero_sign_valid
7524 && !read_modify_subreg_p (SET_DEST (x
)))
7526 x
= gen_rtx_SET (SUBREG_REG (SET_DEST (x
)),
7528 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
7535 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7536 inner
= SUBREG_REG (inner
);
7538 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7539 if (!is_a
<scalar_int_mode
> (GET_MODE (inner
), &compute_mode
))
7541 /* Don't do anything for vector or complex integral types. */
7542 if (! FLOAT_MODE_P (GET_MODE (inner
)))
7545 /* Try to find an integral mode to pun with. */
7546 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner
)), 0)
7547 .exists (&compute_mode
))
7550 inner
= gen_lowpart (compute_mode
, inner
);
7553 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7554 if (len
>= HOST_BITS_PER_WIDE_INT
)
7557 /* Don't try to compute in too wide unsupported modes. */
7558 if (!targetm
.scalar_mode_supported_p (compute_mode
))
7561 /* Now compute the equivalent expression. Make a copy of INNER
7562 for the SET_DEST in case it is a MEM into which we will substitute;
7563 we don't want shared RTL in that case. */
7564 mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< len
) - 1,
7566 cleared
= simplify_gen_binary (AND
, compute_mode
,
7567 simplify_gen_unary (NOT
, compute_mode
,
7568 simplify_gen_binary (ASHIFT
,
7573 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
7574 simplify_gen_binary (
7576 gen_lowpart (compute_mode
, SET_SRC (x
)),
7580 x
= gen_rtx_SET (copy_rtx (inner
),
7581 simplify_gen_binary (IOR
, compute_mode
,
7588 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7589 it is an RTX that represents the (variable) starting position; otherwise,
7590 POS is the (constant) starting bit position. Both are counted from the LSB.
7592 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7594 IN_DEST is nonzero if this is a reference in the destination of a SET.
7595 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7596 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7599 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7600 ZERO_EXTRACT should be built even for bits starting at bit 0.
7602 MODE is the desired mode of the result (if IN_DEST == 0).
7604 The result is an RTX for the extraction or NULL_RTX if the target
7608 make_extraction (machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7609 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7610 int in_dest
, int in_compare
)
7612 /* This mode describes the size of the storage area
7613 to fetch the overall value from. Within that, we
7614 ignore the POS lowest bits, etc. */
7615 machine_mode is_mode
= GET_MODE (inner
);
7616 machine_mode inner_mode
;
7617 scalar_int_mode wanted_inner_mode
;
7618 scalar_int_mode wanted_inner_reg_mode
= word_mode
;
7619 scalar_int_mode pos_mode
= word_mode
;
7620 machine_mode extraction_mode
= word_mode
;
7622 rtx orig_pos_rtx
= pos_rtx
;
7623 HOST_WIDE_INT orig_pos
;
7625 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7626 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7628 if (GET_CODE (inner
) == SUBREG
7629 && subreg_lowpart_p (inner
)
7630 && (paradoxical_subreg_p (inner
)
7631 /* If trying or potentionally trying to extract
7632 bits outside of is_mode, don't look through
7633 non-paradoxical SUBREGs. See PR82192. */
7634 || (pos_rtx
== NULL_RTX
7635 && known_le (pos
+ len
, GET_MODE_PRECISION (is_mode
)))))
7637 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7638 consider just the QI as the memory to extract from.
7639 The subreg adds or removes high bits; its mode is
7640 irrelevant to the meaning of this extraction,
7641 since POS and LEN count from the lsb. */
7642 if (MEM_P (SUBREG_REG (inner
)))
7643 is_mode
= GET_MODE (SUBREG_REG (inner
));
7644 inner
= SUBREG_REG (inner
);
7646 else if (GET_CODE (inner
) == ASHIFT
7647 && CONST_INT_P (XEXP (inner
, 1))
7648 && pos_rtx
== 0 && pos
== 0
7649 && len
> UINTVAL (XEXP (inner
, 1)))
7651 /* We're extracting the least significant bits of an rtx
7652 (ashift X (const_int C)), where LEN > C. Extract the
7653 least significant (LEN - C) bits of X, giving an rtx
7654 whose mode is MODE, then shift it left C times. */
7655 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7656 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7657 unsignedp
, in_dest
, in_compare
);
7659 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7661 else if (GET_CODE (inner
) == TRUNCATE
7662 /* If trying or potentionally trying to extract
7663 bits outside of is_mode, don't look through
7664 TRUNCATE. See PR82192. */
7665 && pos_rtx
== NULL_RTX
7666 && known_le (pos
+ len
, GET_MODE_PRECISION (is_mode
)))
7667 inner
= XEXP (inner
, 0);
7669 inner_mode
= GET_MODE (inner
);
7671 /* See if this can be done without an extraction. We never can if the
7672 width of the field is not the same as that of some integer mode. For
7673 registers, we can only avoid the extraction if the position is at the
7674 low-order bit and this is either not in the destination or we have the
7675 appropriate STRICT_LOW_PART operation available.
7677 For MEM, we can avoid an extract if the field starts on an appropriate
7678 boundary and we can change the mode of the memory reference. */
7680 scalar_int_mode tmode
;
7681 if (int_mode_for_size (len
, 1).exists (&tmode
)
7682 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7684 && (pos
== 0 || REG_P (inner
))
7685 && (inner_mode
== tmode
7687 || TRULY_NOOP_TRUNCATION_MODES_P (tmode
, inner_mode
)
7688 || reg_truncated_to_mode (tmode
, inner
))
7691 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7692 || (MEM_P (inner
) && pos_rtx
== 0
7694 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7695 : BITS_PER_UNIT
)) == 0
7696 /* We can't do this if we are widening INNER_MODE (it
7697 may not be aligned, for one thing). */
7698 && !paradoxical_subreg_p (tmode
, inner_mode
)
7699 && known_le (pos
+ len
, GET_MODE_PRECISION (is_mode
))
7700 && (inner_mode
== tmode
7701 || (! mode_dependent_address_p (XEXP (inner
, 0),
7702 MEM_ADDR_SPACE (inner
))
7703 && ! MEM_VOLATILE_P (inner
))))))
7705 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7706 field. If the original and current mode are the same, we need not
7707 adjust the offset. Otherwise, we do if bytes big endian.
7709 If INNER is not a MEM, get a piece consisting of just the field
7710 of interest (in this case POS % BITS_PER_WORD must be 0). */
7716 /* POS counts from lsb, but make OFFSET count in memory order. */
7717 if (BYTES_BIG_ENDIAN
)
7718 offset
= bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode
)
7721 offset
= pos
/ BITS_PER_UNIT
;
7723 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7725 else if (REG_P (inner
))
7727 if (tmode
!= inner_mode
)
7729 /* We can't call gen_lowpart in a DEST since we
7730 always want a SUBREG (see below) and it would sometimes
7731 return a new hard register. */
7735 = subreg_offset_from_lsb (tmode
, inner_mode
, pos
);
7737 /* Avoid creating invalid subregs, for example when
7738 simplifying (x>>32)&255. */
7739 if (!validate_subreg (tmode
, inner_mode
, inner
, offset
))
7742 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, offset
);
7745 new_rtx
= gen_lowpart (tmode
, inner
);
7751 new_rtx
= force_to_mode (inner
, tmode
,
7752 len
>= HOST_BITS_PER_WIDE_INT
7754 : (HOST_WIDE_INT_1U
<< len
) - 1, 0);
7756 /* If this extraction is going into the destination of a SET,
7757 make a STRICT_LOW_PART unless we made a MEM. */
7760 return (MEM_P (new_rtx
) ? new_rtx
7761 : (GET_CODE (new_rtx
) != SUBREG
7762 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7763 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7768 if (CONST_SCALAR_INT_P (new_rtx
))
7769 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7770 mode
, new_rtx
, tmode
);
7772 /* If we know that no extraneous bits are set, and that the high
7773 bit is not set, convert the extraction to the cheaper of
7774 sign and zero extension, that are equivalent in these cases. */
7775 if (flag_expensive_optimizations
7776 && (HWI_COMPUTABLE_MODE_P (tmode
)
7777 && ((nonzero_bits (new_rtx
, tmode
)
7778 & ~(((unsigned HOST_WIDE_INT
)GET_MODE_MASK (tmode
)) >> 1))
7781 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7782 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7784 /* Prefer ZERO_EXTENSION, since it gives more information to
7786 if (set_src_cost (temp
, mode
, optimize_this_for_speed_p
)
7787 <= set_src_cost (temp1
, mode
, optimize_this_for_speed_p
))
7792 /* Otherwise, sign- or zero-extend unless we already are in the
7795 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7799 /* Unless this is a COMPARE or we have a funny memory reference,
7800 don't do anything with zero-extending field extracts starting at
7801 the low-order bit since they are simple AND operations. */
7802 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7803 && ! in_compare
&& unsignedp
)
7806 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7807 if the position is not a constant and the length is not 1. In all
7808 other cases, we would only be going outside our object in cases when
7809 an original shift would have been undefined. */
7811 && ((pos_rtx
== 0 && maybe_gt (pos
+ len
, GET_MODE_PRECISION (is_mode
)))
7812 || (pos_rtx
!= 0 && len
!= 1)))
7815 enum extraction_pattern pattern
= (in_dest
? EP_insv
7816 : unsignedp
? EP_extzv
: EP_extv
);
7818 /* If INNER is not from memory, we want it to have the mode of a register
7819 extraction pattern's structure operand, or word_mode if there is no
7820 such pattern. The same applies to extraction_mode and pos_mode
7821 and their respective operands.
7823 For memory, assume that the desired extraction_mode and pos_mode
7824 are the same as for a register operation, since at present we don't
7825 have named patterns for aligned memory structures. */
7826 class extraction_insn insn
;
7827 unsigned int inner_size
;
7828 if (GET_MODE_BITSIZE (inner_mode
).is_constant (&inner_size
)
7829 && get_best_reg_extraction_insn (&insn
, pattern
, inner_size
, mode
))
7831 wanted_inner_reg_mode
= insn
.struct_mode
.require ();
7832 pos_mode
= insn
.pos_mode
;
7833 extraction_mode
= insn
.field_mode
;
7836 /* Never narrow an object, since that might not be safe. */
7838 if (mode
!= VOIDmode
7839 && partial_subreg_p (extraction_mode
, mode
))
7840 extraction_mode
= mode
;
7842 /* Punt if len is too large for extraction_mode. */
7843 if (maybe_gt (len
, GET_MODE_PRECISION (extraction_mode
)))
7847 wanted_inner_mode
= wanted_inner_reg_mode
;
7850 /* Be careful not to go beyond the extracted object and maintain the
7851 natural alignment of the memory. */
7852 wanted_inner_mode
= smallest_int_mode_for_size (len
);
7853 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7854 > GET_MODE_BITSIZE (wanted_inner_mode
))
7855 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
).require ();
7860 if (BITS_BIG_ENDIAN
)
7862 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7863 BITS_BIG_ENDIAN style. If position is constant, compute new
7864 position. Otherwise, build subtraction.
7865 Note that POS is relative to the mode of the original argument.
7866 If it's a MEM we need to recompute POS relative to that.
7867 However, if we're extracting from (or inserting into) a register,
7868 we want to recompute POS relative to wanted_inner_mode. */
7871 width
= GET_MODE_BITSIZE (wanted_inner_mode
);
7872 else if (!GET_MODE_BITSIZE (is_mode
).is_constant (&width
))
7876 pos
= width
- len
- pos
;
7879 = gen_rtx_MINUS (GET_MODE (pos_rtx
),
7880 gen_int_mode (width
- len
, GET_MODE (pos_rtx
)),
7882 /* POS may be less than 0 now, but we check for that below.
7883 Note that it can only be less than 0 if !MEM_P (inner). */
7886 /* If INNER has a wider mode, and this is a constant extraction, try to
7887 make it smaller and adjust the byte to point to the byte containing
7889 if (wanted_inner_mode
!= VOIDmode
7890 && inner_mode
!= wanted_inner_mode
7892 && partial_subreg_p (wanted_inner_mode
, is_mode
)
7894 && ! mode_dependent_address_p (XEXP (inner
, 0), MEM_ADDR_SPACE (inner
))
7895 && ! MEM_VOLATILE_P (inner
))
7897 poly_int64 offset
= 0;
7899 /* The computations below will be correct if the machine is big
7900 endian in both bits and bytes or little endian in bits and bytes.
7901 If it is mixed, we must adjust. */
7903 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7904 adjust OFFSET to compensate. */
7905 if (BYTES_BIG_ENDIAN
7906 && paradoxical_subreg_p (is_mode
, inner_mode
))
7907 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7909 /* We can now move to the desired byte. */
7910 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7911 * GET_MODE_SIZE (wanted_inner_mode
);
7912 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7914 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7915 && is_mode
!= wanted_inner_mode
)
7916 offset
= (GET_MODE_SIZE (is_mode
)
7917 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7919 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7922 /* If INNER is not memory, get it into the proper mode. If we are changing
7923 its mode, POS must be a constant and smaller than the size of the new
7925 else if (!MEM_P (inner
))
7927 /* On the LHS, don't create paradoxical subregs implicitely truncating
7928 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7930 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner
),
7934 if (GET_MODE (inner
) != wanted_inner_mode
7936 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7942 inner
= force_to_mode (inner
, wanted_inner_mode
,
7944 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7946 : (((HOST_WIDE_INT_1U
<< len
) - 1)
7951 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7952 have to zero extend. Otherwise, we can just use a SUBREG.
7954 We dealt with constant rtxes earlier, so pos_rtx cannot
7955 have VOIDmode at this point. */
7957 && (GET_MODE_SIZE (pos_mode
)
7958 > GET_MODE_SIZE (as_a
<scalar_int_mode
> (GET_MODE (pos_rtx
)))))
7960 rtx temp
= simplify_gen_unary (ZERO_EXTEND
, pos_mode
, pos_rtx
,
7961 GET_MODE (pos_rtx
));
7963 /* If we know that no extraneous bits are set, and that the high
7964 bit is not set, convert extraction to cheaper one - either
7965 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7967 if (flag_expensive_optimizations
7968 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx
))
7969 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7970 & ~(((unsigned HOST_WIDE_INT
)
7971 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7975 rtx temp1
= simplify_gen_unary (SIGN_EXTEND
, pos_mode
, pos_rtx
,
7976 GET_MODE (pos_rtx
));
7978 /* Prefer ZERO_EXTENSION, since it gives more information to
7980 if (set_src_cost (temp1
, pos_mode
, optimize_this_for_speed_p
)
7981 < set_src_cost (temp
, pos_mode
, optimize_this_for_speed_p
))
7987 /* Make POS_RTX unless we already have it and it is correct. If we don't
7988 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7990 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
7991 pos_rtx
= orig_pos_rtx
;
7993 else if (pos_rtx
== 0)
7994 pos_rtx
= GEN_INT (pos
);
7996 /* Make the required operation. See if we can use existing rtx. */
7997 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
7998 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
8000 new_rtx
= gen_lowpart (mode
, new_rtx
);
8005 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
8006 can be commuted with any other operations in X. Return X without
8007 that shift if so. */
8010 extract_left_shift (scalar_int_mode mode
, rtx x
, int count
)
8012 enum rtx_code code
= GET_CODE (x
);
8018 /* This is the shift itself. If it is wide enough, we will return
8019 either the value being shifted if the shift count is equal to
8020 COUNT or a shift for the difference. */
8021 if (CONST_INT_P (XEXP (x
, 1))
8022 && INTVAL (XEXP (x
, 1)) >= count
)
8023 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
8024 INTVAL (XEXP (x
, 1)) - count
);
8028 if ((tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
8029 return simplify_gen_unary (code
, mode
, tem
, mode
);
8033 case PLUS
: case IOR
: case XOR
: case AND
:
8034 /* If we can safely shift this constant and we find the inner shift,
8035 make a new operation. */
8036 if (CONST_INT_P (XEXP (x
, 1))
8037 && (UINTVAL (XEXP (x
, 1))
8038 & (((HOST_WIDE_INT_1U
<< count
)) - 1)) == 0
8039 && (tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
8041 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1)) >> count
;
8042 return simplify_gen_binary (code
, mode
, tem
,
8043 gen_int_mode (val
, mode
));
8054 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
8055 level of the expression and MODE is its mode. IN_CODE is as for
8056 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
8057 that should be used when recursing on operands of *X_PTR.
8059 There are two possible actions:
8061 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
8062 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
8064 - Return a new rtx, which the caller returns directly. */
8067 make_compound_operation_int (scalar_int_mode mode
, rtx
*x_ptr
,
8068 enum rtx_code in_code
,
8069 enum rtx_code
*next_code_ptr
)
8072 enum rtx_code next_code
= *next_code_ptr
;
8073 enum rtx_code code
= GET_CODE (x
);
8074 int mode_width
= GET_MODE_PRECISION (mode
);
8079 scalar_int_mode inner_mode
;
8080 bool equality_comparison
= false;
8084 equality_comparison
= true;
8088 /* Process depending on the code of this operation. If NEW is set
8089 nonzero, it will be returned. */
8094 /* Convert shifts by constants into multiplications if inside
8096 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
8097 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8098 && INTVAL (XEXP (x
, 1)) >= 0)
8100 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
8101 HOST_WIDE_INT multval
= HOST_WIDE_INT_1
<< count
;
8103 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8104 if (GET_CODE (new_rtx
) == NEG
)
8106 new_rtx
= XEXP (new_rtx
, 0);
8109 multval
= trunc_int_for_mode (multval
, mode
);
8110 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, gen_int_mode (multval
, mode
));
8117 lhs
= make_compound_operation (lhs
, next_code
);
8118 rhs
= make_compound_operation (rhs
, next_code
);
8119 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
)
8121 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
8123 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
8125 else if (GET_CODE (lhs
) == MULT
8126 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
8128 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
8129 simplify_gen_unary (NEG
, mode
,
8132 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
8136 SUBST (XEXP (x
, 0), lhs
);
8137 SUBST (XEXP (x
, 1), rhs
);
8139 maybe_swap_commutative_operands (x
);
8145 lhs
= make_compound_operation (lhs
, next_code
);
8146 rhs
= make_compound_operation (rhs
, next_code
);
8147 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
)
8149 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
8151 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
8153 else if (GET_CODE (rhs
) == MULT
8154 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
8156 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
8157 simplify_gen_unary (NEG
, mode
,
8160 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
8164 SUBST (XEXP (x
, 0), lhs
);
8165 SUBST (XEXP (x
, 1), rhs
);
8170 /* If the second operand is not a constant, we can't do anything
8172 if (!CONST_INT_P (XEXP (x
, 1)))
8175 /* If the constant is a power of two minus one and the first operand
8176 is a logical right shift, make an extraction. */
8177 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8178 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8180 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8181 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1),
8182 i
, 1, 0, in_code
== COMPARE
);
8185 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8186 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
8187 && subreg_lowpart_p (XEXP (x
, 0))
8188 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (XEXP (x
, 0))),
8190 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
8191 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8193 rtx inner_x0
= SUBREG_REG (XEXP (x
, 0));
8194 new_rtx
= make_compound_operation (XEXP (inner_x0
, 0), next_code
);
8195 new_rtx
= make_extraction (inner_mode
, new_rtx
, 0,
8197 i
, 1, 0, in_code
== COMPARE
);
8199 /* If we narrowed the mode when dropping the subreg, then we lose. */
8200 if (GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (mode
))
8203 /* If that didn't give anything, see if the AND simplifies on
8205 if (!new_rtx
&& i
>= 0)
8207 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8208 new_rtx
= make_extraction (mode
, new_rtx
, 0, NULL_RTX
, i
, 1,
8209 0, in_code
== COMPARE
);
8212 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8213 else if ((GET_CODE (XEXP (x
, 0)) == XOR
8214 || GET_CODE (XEXP (x
, 0)) == IOR
)
8215 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
8216 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
8217 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8219 /* Apply the distributive law, and then try to make extractions. */
8220 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
8221 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
8223 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
8225 new_rtx
= make_compound_operation (new_rtx
, in_code
);
8228 /* If we are have (and (rotate X C) M) and C is larger than the number
8229 of bits in M, this is an extraction. */
8231 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
8232 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8233 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
8234 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
8236 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8237 new_rtx
= make_extraction (mode
, new_rtx
,
8238 (GET_MODE_PRECISION (mode
)
8239 - INTVAL (XEXP (XEXP (x
, 0), 1))),
8240 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8243 /* On machines without logical shifts, if the operand of the AND is
8244 a logical shift and our mask turns off all the propagated sign
8245 bits, we can replace the logical shift with an arithmetic shift. */
8246 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8247 && !have_insn_for (LSHIFTRT
, mode
)
8248 && have_insn_for (ASHIFTRT
, mode
)
8249 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8250 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8251 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8252 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
8254 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
8256 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
8257 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
8259 gen_rtx_ASHIFTRT (mode
,
8260 make_compound_operation (XEXP (XEXP (x
,
8264 XEXP (XEXP (x
, 0), 1)));
8267 /* If the constant is one less than a power of two, this might be
8268 representable by an extraction even if no shift is present.
8269 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8270 we are in a COMPARE. */
8271 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8272 new_rtx
= make_extraction (mode
,
8273 make_compound_operation (XEXP (x
, 0),
8275 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8277 /* If we are in a comparison and this is an AND with a power of two,
8278 convert this into the appropriate bit extract. */
8279 else if (in_code
== COMPARE
8280 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
8281 && (equality_comparison
|| i
< GET_MODE_PRECISION (mode
) - 1))
8282 new_rtx
= make_extraction (mode
,
8283 make_compound_operation (XEXP (x
, 0),
8285 i
, NULL_RTX
, 1, 1, 0, 1);
8287 /* If the one operand is a paradoxical subreg of a register or memory and
8288 the constant (limited to the smaller mode) has only zero bits where
8289 the sub expression has known zero bits, this can be expressed as
8291 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
)
8295 sub
= XEXP (XEXP (x
, 0), 0);
8296 machine_mode sub_mode
= GET_MODE (sub
);
8298 if ((REG_P (sub
) || MEM_P (sub
))
8299 && GET_MODE_PRECISION (sub_mode
).is_constant (&sub_width
)
8300 && sub_width
< mode_width
)
8302 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (sub_mode
);
8303 unsigned HOST_WIDE_INT mask
;
8305 /* original AND constant with all the known zero bits set */
8306 mask
= UINTVAL (XEXP (x
, 1)) | (~nonzero_bits (sub
, sub_mode
));
8307 if ((mask
& mode_mask
) == mode_mask
)
8309 new_rtx
= make_compound_operation (sub
, next_code
);
8310 new_rtx
= make_extraction (mode
, new_rtx
, 0, 0, sub_width
,
8311 1, 0, in_code
== COMPARE
);
8319 /* If the sign bit is known to be zero, replace this with an
8320 arithmetic shift. */
8321 if (have_insn_for (ASHIFTRT
, mode
)
8322 && ! have_insn_for (LSHIFTRT
, mode
)
8323 && mode_width
<= HOST_BITS_PER_WIDE_INT
8324 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
8326 new_rtx
= gen_rtx_ASHIFTRT (mode
,
8327 make_compound_operation (XEXP (x
, 0),
8339 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8340 this is a SIGN_EXTRACT. */
8341 if (CONST_INT_P (rhs
)
8342 && GET_CODE (lhs
) == ASHIFT
8343 && CONST_INT_P (XEXP (lhs
, 1))
8344 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
8345 && INTVAL (XEXP (lhs
, 1)) >= 0
8346 && INTVAL (rhs
) < mode_width
)
8348 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
8349 new_rtx
= make_extraction (mode
, new_rtx
,
8350 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
8351 NULL_RTX
, mode_width
- INTVAL (rhs
),
8352 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8356 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8357 If so, try to merge the shifts into a SIGN_EXTEND. We could
8358 also do this for some cases of SIGN_EXTRACT, but it doesn't
8359 seem worth the effort; the case checked for occurs on Alpha. */
8362 && ! (GET_CODE (lhs
) == SUBREG
8363 && (OBJECT_P (SUBREG_REG (lhs
))))
8364 && CONST_INT_P (rhs
)
8365 && INTVAL (rhs
) >= 0
8366 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
8367 && INTVAL (rhs
) < mode_width
8368 && (new_rtx
= extract_left_shift (mode
, lhs
, INTVAL (rhs
))) != 0)
8369 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
,
8371 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
8372 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8377 /* Call ourselves recursively on the inner expression. If we are
8378 narrowing the object and it has a different RTL code from
8379 what it originally did, do this SUBREG as a force_to_mode. */
8381 rtx inner
= SUBREG_REG (x
), simplified
;
8382 enum rtx_code subreg_code
= in_code
;
8384 /* If the SUBREG is masking of a logical right shift,
8385 make an extraction. */
8386 if (GET_CODE (inner
) == LSHIFTRT
8387 && is_a
<scalar_int_mode
> (GET_MODE (inner
), &inner_mode
)
8388 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (inner_mode
)
8389 && CONST_INT_P (XEXP (inner
, 1))
8390 && UINTVAL (XEXP (inner
, 1)) < GET_MODE_PRECISION (inner_mode
)
8391 && subreg_lowpart_p (x
))
8393 new_rtx
= make_compound_operation (XEXP (inner
, 0), next_code
);
8394 int width
= GET_MODE_PRECISION (inner_mode
)
8395 - INTVAL (XEXP (inner
, 1));
8396 if (width
> mode_width
)
8398 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (inner
, 1),
8399 width
, 1, 0, in_code
== COMPARE
);
8403 /* If in_code is COMPARE, it isn't always safe to pass it through
8404 to the recursive make_compound_operation call. */
8405 if (subreg_code
== COMPARE
8406 && (!subreg_lowpart_p (x
)
8407 || GET_CODE (inner
) == SUBREG
8408 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8409 is (const_int 0), rather than
8410 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8411 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8412 for non-equality comparisons against 0 is not equivalent
8413 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8414 || (GET_CODE (inner
) == AND
8415 && CONST_INT_P (XEXP (inner
, 1))
8416 && partial_subreg_p (x
)
8417 && exact_log2 (UINTVAL (XEXP (inner
, 1)))
8418 >= GET_MODE_BITSIZE (mode
) - 1)))
8421 tem
= make_compound_operation (inner
, subreg_code
);
8424 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
8428 if (GET_CODE (tem
) != GET_CODE (inner
)
8429 && partial_subreg_p (x
)
8430 && subreg_lowpart_p (x
))
8433 = force_to_mode (tem
, mode
, HOST_WIDE_INT_M1U
, 0);
8435 /* If we have something other than a SUBREG, we might have
8436 done an expansion, so rerun ourselves. */
8437 if (GET_CODE (newer
) != SUBREG
)
8438 newer
= make_compound_operation (newer
, in_code
);
8440 /* force_to_mode can expand compounds. If it just re-expanded
8441 the compound, use gen_lowpart to convert to the desired
8443 if (rtx_equal_p (newer
, x
)
8444 /* Likewise if it re-expanded the compound only partially.
8445 This happens for SUBREG of ZERO_EXTRACT if they extract
8446 the same number of bits. */
8447 || (GET_CODE (newer
) == SUBREG
8448 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
8449 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
8450 && GET_CODE (inner
) == AND
8451 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
8452 return gen_lowpart (GET_MODE (x
), tem
);
8467 *x_ptr
= gen_lowpart (mode
, new_rtx
);
8468 *next_code_ptr
= next_code
;
8472 /* Look at the expression rooted at X. Look for expressions
8473 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8474 Form these expressions.
8476 Return the new rtx, usually just X.
8478 Also, for machines like the VAX that don't have logical shift insns,
8479 try to convert logical to arithmetic shift operations in cases where
8480 they are equivalent. This undoes the canonicalizations to logical
8481 shifts done elsewhere.
8483 We try, as much as possible, to re-use rtl expressions to save memory.
8485 IN_CODE says what kind of expression we are processing. Normally, it is
8486 SET. In a memory address it is MEM. When processing the arguments of
8487 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8488 precisely it is an equality comparison against zero. */
8491 make_compound_operation (rtx x
, enum rtx_code in_code
)
8493 enum rtx_code code
= GET_CODE (x
);
8496 enum rtx_code next_code
;
8499 /* Select the code to be used in recursive calls. Once we are inside an
8500 address, we stay there. If we have a comparison, set to COMPARE,
8501 but once inside, go back to our default of SET. */
8503 next_code
= (code
== MEM
? MEM
8504 : ((code
== COMPARE
|| COMPARISON_P (x
))
8505 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
8506 : in_code
== COMPARE
|| in_code
== EQ
? SET
: in_code
);
8508 scalar_int_mode mode
;
8509 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
8511 rtx new_rtx
= make_compound_operation_int (mode
, &x
, in_code
,
8515 code
= GET_CODE (x
);
8518 /* Now recursively process each operand of this operation. We need to
8519 handle ZERO_EXTEND specially so that we don't lose track of the
8521 if (code
== ZERO_EXTEND
)
8523 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8524 tem
= simplify_const_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8525 new_rtx
, GET_MODE (XEXP (x
, 0)));
8528 SUBST (XEXP (x
, 0), new_rtx
);
8532 fmt
= GET_RTX_FORMAT (code
);
8533 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
8536 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
8537 SUBST (XEXP (x
, i
), new_rtx
);
8539 else if (fmt
[i
] == 'E')
8540 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8542 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
8543 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
8546 maybe_swap_commutative_operands (x
);
8550 /* Given M see if it is a value that would select a field of bits
8551 within an item, but not the entire word. Return -1 if not.
8552 Otherwise, return the starting position of the field, where 0 is the
8555 *PLEN is set to the length of the field. */
8558 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
8560 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8561 int pos
= m
? ctz_hwi (m
) : -1;
8565 /* Now shift off the low-order zero bits and see if we have a
8566 power of two minus 1. */
8567 len
= exact_log2 ((m
>> pos
) + 1);
8576 /* If X refers to a register that equals REG in value, replace these
8577 references with REG. */
8579 canon_reg_for_combine (rtx x
, rtx reg
)
8586 enum rtx_code code
= GET_CODE (x
);
8587 switch (GET_RTX_CLASS (code
))
8590 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8591 if (op0
!= XEXP (x
, 0))
8592 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
8597 case RTX_COMM_ARITH
:
8598 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8599 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8600 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8601 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
8605 case RTX_COMM_COMPARE
:
8606 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8607 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8608 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8609 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
8610 GET_MODE (op0
), op0
, op1
);
8614 case RTX_BITFIELD_OPS
:
8615 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8616 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8617 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
8618 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
8619 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
8620 GET_MODE (op0
), op0
, op1
, op2
);
8626 if (rtx_equal_p (get_last_value (reg
), x
)
8627 || rtx_equal_p (reg
, get_last_value (x
)))
8636 fmt
= GET_RTX_FORMAT (code
);
8638 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8641 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
8642 if (op
!= XEXP (x
, i
))
8652 else if (fmt
[i
] == 'E')
8655 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8657 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
8658 if (op
!= XVECEXP (x
, i
, j
))
8665 XVECEXP (x
, i
, j
) = op
;
8676 /* Return X converted to MODE. If the value is already truncated to
8677 MODE we can just return a subreg even though in the general case we
8678 would need an explicit truncation. */
8681 gen_lowpart_or_truncate (machine_mode mode
, rtx x
)
8683 if (!CONST_INT_P (x
)
8684 && partial_subreg_p (mode
, GET_MODE (x
))
8685 && !TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (x
))
8686 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
8688 /* Bit-cast X into an integer mode. */
8689 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
8690 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)).require (), x
);
8691 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
).require (),
8695 return gen_lowpart (mode
, x
);
8698 /* See if X can be simplified knowing that we will only refer to it in
8699 MODE and will only refer to those bits that are nonzero in MASK.
8700 If other bits are being computed or if masking operations are done
8701 that select a superset of the bits in MASK, they can sometimes be
8704 Return a possibly simplified expression, but always convert X to
8705 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8707 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8708 are all off in X. This is used when X will be complemented, by either
8709 NOT, NEG, or XOR. */
8712 force_to_mode (rtx x
, machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8715 enum rtx_code code
= GET_CODE (x
);
8716 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8717 machine_mode op_mode
;
8718 unsigned HOST_WIDE_INT nonzero
;
8720 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8721 code below will do the wrong thing since the mode of such an
8722 expression is VOIDmode.
8724 Also do nothing if X is a CLOBBER; this can happen if X was
8725 the return value from a call to gen_lowpart. */
8726 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8729 /* We want to perform the operation in its present mode unless we know
8730 that the operation is valid in MODE, in which case we do the operation
8732 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8733 && have_insn_for (code
, mode
))
8734 ? mode
: GET_MODE (x
));
8736 /* It is not valid to do a right-shift in a narrower mode
8737 than the one it came in with. */
8738 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8739 && partial_subreg_p (mode
, GET_MODE (x
)))
8740 op_mode
= GET_MODE (x
);
8742 /* Truncate MASK to fit OP_MODE. */
8744 mask
&= GET_MODE_MASK (op_mode
);
8746 /* Determine what bits of X are guaranteed to be (non)zero. */
8747 nonzero
= nonzero_bits (x
, mode
);
8749 /* If none of the bits in X are needed, return a zero. */
8750 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8753 /* If X is a CONST_INT, return a new one. Do this here since the
8754 test below will fail. */
8755 if (CONST_INT_P (x
))
8757 if (SCALAR_INT_MODE_P (mode
))
8758 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8761 x
= GEN_INT (INTVAL (x
) & mask
);
8762 return gen_lowpart_common (mode
, x
);
8766 /* If X is narrower than MODE and we want all the bits in X's mode, just
8767 get X in the proper mode. */
8768 if (paradoxical_subreg_p (mode
, GET_MODE (x
))
8769 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8770 return gen_lowpart (mode
, x
);
8772 /* We can ignore the effect of a SUBREG if it narrows the mode or
8773 if the constant masks to zero all the bits the mode doesn't have. */
8774 if (GET_CODE (x
) == SUBREG
8775 && subreg_lowpart_p (x
)
8776 && (partial_subreg_p (x
)
8778 & GET_MODE_MASK (GET_MODE (x
))
8779 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))) == 0))
8780 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8782 scalar_int_mode int_mode
, xmode
;
8783 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
8784 && is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
8785 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8787 return force_int_to_mode (x
, int_mode
, xmode
,
8788 as_a
<scalar_int_mode
> (op_mode
),
8791 return gen_lowpart_or_truncate (mode
, x
);
8794 /* Subroutine of force_to_mode that handles cases in which both X and
8795 the result are scalar integers. MODE is the mode of the result,
8796 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8797 is preferred for simplified versions of X. The other arguments
8798 are as for force_to_mode. */
8801 force_int_to_mode (rtx x
, scalar_int_mode mode
, scalar_int_mode xmode
,
8802 scalar_int_mode op_mode
, unsigned HOST_WIDE_INT mask
,
8805 enum rtx_code code
= GET_CODE (x
);
8806 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8807 unsigned HOST_WIDE_INT fuller_mask
;
8809 poly_int64 const_op0
;
8811 /* When we have an arithmetic operation, or a shift whose count we
8812 do not know, we need to assume that all bits up to the highest-order
8813 bit in MASK will be needed. This is how we form such a mask. */
8814 if (mask
& (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
8815 fuller_mask
= HOST_WIDE_INT_M1U
;
8817 fuller_mask
= ((HOST_WIDE_INT_1U
<< (floor_log2 (mask
) + 1))
8823 /* If X is a (clobber (const_int)), return it since we know we are
8824 generating something that won't match. */
8831 x
= expand_compound_operation (x
);
8832 if (GET_CODE (x
) != code
)
8833 return force_to_mode (x
, mode
, mask
, next_select
);
8837 /* Similarly for a truncate. */
8838 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8841 /* If this is an AND with a constant, convert it into an AND
8842 whose constant is the AND of that constant with MASK. If it
8843 remains an AND of MASK, delete it since it is redundant. */
8845 if (CONST_INT_P (XEXP (x
, 1)))
8847 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8848 mask
& INTVAL (XEXP (x
, 1)));
8851 /* If X is still an AND, see if it is an AND with a mask that
8852 is just some low-order bits. If so, and it is MASK, we don't
8855 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8856 && (INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (xmode
)) == mask
)
8859 /* If it remains an AND, try making another AND with the bits
8860 in the mode mask that aren't in MASK turned on. If the
8861 constant in the AND is wide enough, this might make a
8862 cheaper constant. */
8864 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8865 && GET_MODE_MASK (xmode
) != mask
8866 && HWI_COMPUTABLE_MODE_P (xmode
))
8868 unsigned HOST_WIDE_INT cval
8869 = UINTVAL (XEXP (x
, 1)) | (GET_MODE_MASK (xmode
) & ~mask
);
8872 y
= simplify_gen_binary (AND
, xmode
, XEXP (x
, 0),
8873 gen_int_mode (cval
, xmode
));
8874 if (set_src_cost (y
, xmode
, optimize_this_for_speed_p
)
8875 < set_src_cost (x
, xmode
, optimize_this_for_speed_p
))
8885 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8886 low-order bits (as in an alignment operation) and FOO is already
8887 aligned to that boundary, mask C1 to that boundary as well.
8888 This may eliminate that PLUS and, later, the AND. */
8891 unsigned int width
= GET_MODE_PRECISION (mode
);
8892 unsigned HOST_WIDE_INT smask
= mask
;
8894 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8895 number, sign extend it. */
8897 if (width
< HOST_BITS_PER_WIDE_INT
8898 && (smask
& (HOST_WIDE_INT_1U
<< (width
- 1))) != 0)
8899 smask
|= HOST_WIDE_INT_M1U
<< width
;
8901 if (CONST_INT_P (XEXP (x
, 1))
8902 && pow2p_hwi (- smask
)
8903 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8904 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8905 return force_to_mode (plus_constant (xmode
, XEXP (x
, 0),
8906 (INTVAL (XEXP (x
, 1)) & smask
)),
8907 mode
, smask
, next_select
);
8913 /* Substituting into the operands of a widening MULT is not likely to
8914 create RTL matching a machine insn. */
8916 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
8917 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
8918 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
8919 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
8920 && REG_P (XEXP (XEXP (x
, 0), 0))
8921 && REG_P (XEXP (XEXP (x
, 1), 0)))
8922 return gen_lowpart_or_truncate (mode
, x
);
8924 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8925 most significant bit in MASK since carries from those bits will
8926 affect the bits we are interested in. */
8931 /* If X is (minus C Y) where C's least set bit is larger than any bit
8932 in the mask, then we may replace with (neg Y). */
8933 if (poly_int_rtx_p (XEXP (x
, 0), &const_op0
)
8934 && known_alignment (poly_uint64 (const_op0
)) > mask
)
8936 x
= simplify_gen_unary (NEG
, xmode
, XEXP (x
, 1), xmode
);
8937 return force_to_mode (x
, mode
, mask
, next_select
);
8940 /* Similarly, if C contains every bit in the fuller_mask, then we may
8941 replace with (not Y). */
8942 if (CONST_INT_P (XEXP (x
, 0))
8943 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8945 x
= simplify_gen_unary (NOT
, xmode
, XEXP (x
, 1), xmode
);
8946 return force_to_mode (x
, mode
, mask
, next_select
);
8954 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8955 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8956 operation which may be a bitfield extraction. Ensure that the
8957 constant we form is not wider than the mode of X. */
8959 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8960 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8961 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8962 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8963 && CONST_INT_P (XEXP (x
, 1))
8964 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8965 + floor_log2 (INTVAL (XEXP (x
, 1))))
8966 < GET_MODE_PRECISION (xmode
))
8967 && (UINTVAL (XEXP (x
, 1))
8968 & ~nonzero_bits (XEXP (x
, 0), xmode
)) == 0)
8970 temp
= gen_int_mode ((INTVAL (XEXP (x
, 1)) & mask
)
8971 << INTVAL (XEXP (XEXP (x
, 0), 1)),
8973 temp
= simplify_gen_binary (GET_CODE (x
), xmode
,
8974 XEXP (XEXP (x
, 0), 0), temp
);
8975 x
= simplify_gen_binary (LSHIFTRT
, xmode
, temp
,
8976 XEXP (XEXP (x
, 0), 1));
8977 return force_to_mode (x
, mode
, mask
, next_select
);
8981 /* For most binary operations, just propagate into the operation and
8982 change the mode if we have an operation of that mode. */
8984 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8985 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
8987 /* If we ended up truncating both operands, truncate the result of the
8988 operation instead. */
8989 if (GET_CODE (op0
) == TRUNCATE
8990 && GET_CODE (op1
) == TRUNCATE
)
8992 op0
= XEXP (op0
, 0);
8993 op1
= XEXP (op1
, 0);
8996 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
8997 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
8999 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
9001 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
9007 /* For left shifts, do the same, but just for the first operand.
9008 However, we cannot do anything with shifts where we cannot
9009 guarantee that the counts are smaller than the size of the mode
9010 because such a count will have a different meaning in a
9013 if (! (CONST_INT_P (XEXP (x
, 1))
9014 && INTVAL (XEXP (x
, 1)) >= 0
9015 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (mode
))
9016 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
9017 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
9018 < (unsigned HOST_WIDE_INT
) GET_MODE_PRECISION (mode
))))
9021 /* If the shift count is a constant and we can do arithmetic in
9022 the mode of the shift, refine which bits we need. Otherwise, use the
9023 conservative form of the mask. */
9024 if (CONST_INT_P (XEXP (x
, 1))
9025 && INTVAL (XEXP (x
, 1)) >= 0
9026 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (op_mode
)
9027 && HWI_COMPUTABLE_MODE_P (op_mode
))
9028 mask
>>= INTVAL (XEXP (x
, 1));
9032 op0
= gen_lowpart_or_truncate (op_mode
,
9033 force_to_mode (XEXP (x
, 0), mode
,
9034 mask
, next_select
));
9036 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
9038 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
9044 /* Here we can only do something if the shift count is a constant,
9045 this shift constant is valid for the host, and we can do arithmetic
9048 if (CONST_INT_P (XEXP (x
, 1))
9049 && INTVAL (XEXP (x
, 1)) >= 0
9050 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
9051 && HWI_COMPUTABLE_MODE_P (op_mode
))
9053 rtx inner
= XEXP (x
, 0);
9054 unsigned HOST_WIDE_INT inner_mask
;
9056 /* Select the mask of the bits we need for the shift operand. */
9057 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
9059 /* We can only change the mode of the shift if we can do arithmetic
9060 in the mode of the shift and INNER_MASK is no wider than the
9061 width of X's mode. */
9062 if ((inner_mask
& ~GET_MODE_MASK (xmode
)) != 0)
9065 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
9067 if (xmode
!= op_mode
|| inner
!= XEXP (x
, 0))
9069 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
9074 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
9075 shift and AND produces only copies of the sign bit (C2 is one less
9076 than a power of two), we can do this with just a shift. */
9078 if (GET_CODE (x
) == LSHIFTRT
9079 && CONST_INT_P (XEXP (x
, 1))
9080 /* The shift puts one of the sign bit copies in the least significant
9082 && ((INTVAL (XEXP (x
, 1))
9083 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
9084 >= GET_MODE_PRECISION (xmode
))
9085 && pow2p_hwi (mask
+ 1)
9086 /* Number of bits left after the shift must be more than the mask
9088 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
9089 <= GET_MODE_PRECISION (xmode
))
9090 /* Must be more sign bit copies than the mask needs. */
9091 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
9092 >= exact_log2 (mask
+ 1)))
9094 int nbits
= GET_MODE_PRECISION (xmode
) - exact_log2 (mask
+ 1);
9095 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0),
9096 gen_int_shift_amount (xmode
, nbits
));
9101 /* If we are just looking for the sign bit, we don't need this shift at
9102 all, even if it has a variable count. */
9103 if (val_signbit_p (xmode
, mask
))
9104 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
9106 /* If this is a shift by a constant, get a mask that contains those bits
9107 that are not copies of the sign bit. We then have two cases: If
9108 MASK only includes those bits, this can be a logical shift, which may
9109 allow simplifications. If MASK is a single-bit field not within
9110 those bits, we are requesting a copy of the sign bit and hence can
9111 shift the sign bit to the appropriate location. */
9113 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
9114 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
9116 unsigned HOST_WIDE_INT nonzero
;
9119 /* If the considered data is wider than HOST_WIDE_INT, we can't
9120 represent a mask for all its bits in a single scalar.
9121 But we only care about the lower bits, so calculate these. */
9123 if (GET_MODE_PRECISION (xmode
) > HOST_BITS_PER_WIDE_INT
)
9125 nonzero
= HOST_WIDE_INT_M1U
;
9127 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
9128 is the number of bits a full-width mask would have set.
9129 We need only shift if these are fewer than nonzero can
9130 hold. If not, we must keep all bits set in nonzero. */
9132 if (GET_MODE_PRECISION (xmode
) - INTVAL (XEXP (x
, 1))
9133 < HOST_BITS_PER_WIDE_INT
)
9134 nonzero
>>= INTVAL (XEXP (x
, 1))
9135 + HOST_BITS_PER_WIDE_INT
9136 - GET_MODE_PRECISION (xmode
);
9140 nonzero
= GET_MODE_MASK (xmode
);
9141 nonzero
>>= INTVAL (XEXP (x
, 1));
9144 if ((mask
& ~nonzero
) == 0)
9146 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, xmode
,
9147 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
9148 if (GET_CODE (x
) != ASHIFTRT
)
9149 return force_to_mode (x
, mode
, mask
, next_select
);
9152 else if ((i
= exact_log2 (mask
)) >= 0)
9154 x
= simplify_shift_const
9155 (NULL_RTX
, LSHIFTRT
, xmode
, XEXP (x
, 0),
9156 GET_MODE_PRECISION (xmode
) - 1 - i
);
9158 if (GET_CODE (x
) != ASHIFTRT
)
9159 return force_to_mode (x
, mode
, mask
, next_select
);
9163 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9164 even if the shift count isn't a constant. */
9166 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0), XEXP (x
, 1));
9170 /* If this is a zero- or sign-extension operation that just affects bits
9171 we don't care about, remove it. Be sure the call above returned
9172 something that is still a shift. */
9174 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
9175 && CONST_INT_P (XEXP (x
, 1))
9176 && INTVAL (XEXP (x
, 1)) >= 0
9177 && (INTVAL (XEXP (x
, 1))
9178 <= GET_MODE_PRECISION (xmode
) - (floor_log2 (mask
) + 1))
9179 && GET_CODE (XEXP (x
, 0)) == ASHIFT
9180 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
9181 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
9188 /* If the shift count is constant and we can do computations
9189 in the mode of X, compute where the bits we care about are.
9190 Otherwise, we can't do anything. Don't change the mode of
9191 the shift or propagate MODE into the shift, though. */
9192 if (CONST_INT_P (XEXP (x
, 1))
9193 && INTVAL (XEXP (x
, 1)) >= 0)
9195 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
9196 xmode
, gen_int_mode (mask
, xmode
),
9198 if (temp
&& CONST_INT_P (temp
))
9199 x
= simplify_gen_binary (code
, xmode
,
9200 force_to_mode (XEXP (x
, 0), xmode
,
9201 INTVAL (temp
), next_select
),
9207 /* If we just want the low-order bit, the NEG isn't needed since it
9208 won't change the low-order bit. */
9210 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
9212 /* We need any bits less significant than the most significant bit in
9213 MASK since carries from those bits will affect the bits we are
9219 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9220 same as the XOR case above. Ensure that the constant we form is not
9221 wider than the mode of X. */
9223 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
9224 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
9225 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
9226 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
9227 < GET_MODE_PRECISION (xmode
))
9228 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
9230 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)), xmode
);
9231 temp
= simplify_gen_binary (XOR
, xmode
, XEXP (XEXP (x
, 0), 0), temp
);
9232 x
= simplify_gen_binary (LSHIFTRT
, xmode
,
9233 temp
, XEXP (XEXP (x
, 0), 1));
9235 return force_to_mode (x
, mode
, mask
, next_select
);
9238 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9239 use the full mask inside the NOT. */
9243 op0
= gen_lowpart_or_truncate (op_mode
,
9244 force_to_mode (XEXP (x
, 0), mode
, mask
,
9246 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
9248 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
9254 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9255 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9256 which is equal to STORE_FLAG_VALUE. */
9257 if ((mask
& ~STORE_FLAG_VALUE
) == 0
9258 && XEXP (x
, 1) == const0_rtx
9259 && GET_MODE (XEXP (x
, 0)) == mode
9260 && pow2p_hwi (nonzero_bits (XEXP (x
, 0), mode
))
9261 && (nonzero_bits (XEXP (x
, 0), mode
)
9262 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
9263 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
9268 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9269 written in a narrower mode. We play it safe and do not do so. */
9271 op0
= gen_lowpart_or_truncate (xmode
,
9272 force_to_mode (XEXP (x
, 1), mode
,
9273 mask
, next_select
));
9274 op1
= gen_lowpart_or_truncate (xmode
,
9275 force_to_mode (XEXP (x
, 2), mode
,
9276 mask
, next_select
));
9277 if (op0
!= XEXP (x
, 1) || op1
!= XEXP (x
, 2))
9278 x
= simplify_gen_ternary (IF_THEN_ELSE
, xmode
,
9279 GET_MODE (XEXP (x
, 0)), XEXP (x
, 0),
9287 /* Ensure we return a value of the proper mode. */
9288 return gen_lowpart_or_truncate (mode
, x
);
9291 /* Return nonzero if X is an expression that has one of two values depending on
9292 whether some other value is zero or nonzero. In that case, we return the
9293 value that is being tested, *PTRUE is set to the value if the rtx being
9294 returned has a nonzero value, and *PFALSE is set to the other alternative.
9296 If we return zero, we set *PTRUE and *PFALSE to X. */
9299 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
9301 machine_mode mode
= GET_MODE (x
);
9302 enum rtx_code code
= GET_CODE (x
);
9303 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
9304 unsigned HOST_WIDE_INT nz
;
9305 scalar_int_mode int_mode
;
9307 /* If we are comparing a value against zero, we are done. */
9308 if ((code
== NE
|| code
== EQ
)
9309 && XEXP (x
, 1) == const0_rtx
)
9311 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
9312 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
9316 /* If this is a unary operation whose operand has one of two values, apply
9317 our opcode to compute those values. */
9318 else if (UNARY_P (x
)
9319 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
9321 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
9322 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
9323 GET_MODE (XEXP (x
, 0)));
9327 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9328 make can't possibly match and would suppress other optimizations. */
9329 else if (code
== COMPARE
)
9332 /* If this is a binary operation, see if either side has only one of two
9333 values. If either one does or if both do and they are conditional on
9334 the same value, compute the new true and false values. */
9335 else if (BINARY_P (x
))
9337 rtx op0
= XEXP (x
, 0);
9338 rtx op1
= XEXP (x
, 1);
9339 cond0
= if_then_else_cond (op0
, &true0
, &false0
);
9340 cond1
= if_then_else_cond (op1
, &true1
, &false1
);
9342 if ((cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
))
9343 && (REG_P (op0
) || REG_P (op1
)))
9345 /* Try to enable a simplification by undoing work done by
9346 if_then_else_cond if it converted a REG into something more
9351 true0
= false0
= op0
;
9356 true1
= false1
= op1
;
9360 if ((cond0
!= 0 || cond1
!= 0)
9361 && ! (cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
)))
9363 /* If if_then_else_cond returned zero, then true/false are the
9364 same rtl. We must copy one of them to prevent invalid rtl
9367 true0
= copy_rtx (true0
);
9368 else if (cond1
== 0)
9369 true1
= copy_rtx (true1
);
9371 if (COMPARISON_P (x
))
9373 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
9375 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
9380 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
9381 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
9384 return cond0
? cond0
: cond1
;
9387 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9388 operands is zero when the other is nonzero, and vice-versa,
9389 and STORE_FLAG_VALUE is 1 or -1. */
9391 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9392 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
9394 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9396 rtx op0
= XEXP (XEXP (x
, 0), 1);
9397 rtx op1
= XEXP (XEXP (x
, 1), 1);
9399 cond0
= XEXP (XEXP (x
, 0), 0);
9400 cond1
= XEXP (XEXP (x
, 1), 0);
9402 if (COMPARISON_P (cond0
)
9403 && COMPARISON_P (cond1
)
9404 && SCALAR_INT_MODE_P (mode
)
9405 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9406 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9407 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9408 || ((swap_condition (GET_CODE (cond0
))
9409 == reversed_comparison_code (cond1
, NULL
))
9410 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9411 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9412 && ! side_effects_p (x
))
9414 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
9415 *pfalse
= simplify_gen_binary (MULT
, mode
,
9417 ? simplify_gen_unary (NEG
, mode
,
9425 /* Similarly for MULT, AND and UMIN, except that for these the result
9427 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9428 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
9429 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9431 cond0
= XEXP (XEXP (x
, 0), 0);
9432 cond1
= XEXP (XEXP (x
, 1), 0);
9434 if (COMPARISON_P (cond0
)
9435 && COMPARISON_P (cond1
)
9436 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9437 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9438 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9439 || ((swap_condition (GET_CODE (cond0
))
9440 == reversed_comparison_code (cond1
, NULL
))
9441 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9442 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9443 && ! side_effects_p (x
))
9445 *ptrue
= *pfalse
= const0_rtx
;
9451 else if (code
== IF_THEN_ELSE
)
9453 /* If we have IF_THEN_ELSE already, extract the condition and
9454 canonicalize it if it is NE or EQ. */
9455 cond0
= XEXP (x
, 0);
9456 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
9457 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
9458 return XEXP (cond0
, 0);
9459 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
9461 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
9462 return XEXP (cond0
, 0);
9468 /* If X is a SUBREG, we can narrow both the true and false values
9469 if the inner expression, if there is a condition. */
9470 else if (code
== SUBREG
9471 && (cond0
= if_then_else_cond (SUBREG_REG (x
), &true0
,
9474 true0
= simplify_gen_subreg (mode
, true0
,
9475 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9476 false0
= simplify_gen_subreg (mode
, false0
,
9477 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9478 if (true0
&& false0
)
9486 /* If X is a constant, this isn't special and will cause confusions
9487 if we treat it as such. Likewise if it is equivalent to a constant. */
9488 else if (CONSTANT_P (x
)
9489 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
9492 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9493 will be least confusing to the rest of the compiler. */
9494 else if (mode
== BImode
)
9496 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
9500 /* If X is known to be either 0 or -1, those are the true and
9501 false values when testing X. */
9502 else if (x
== constm1_rtx
|| x
== const0_rtx
9503 || (is_a
<scalar_int_mode
> (mode
, &int_mode
)
9504 && (num_sign_bit_copies (x
, int_mode
)
9505 == GET_MODE_PRECISION (int_mode
))))
9507 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
9511 /* Likewise for 0 or a single bit. */
9512 else if (HWI_COMPUTABLE_MODE_P (mode
)
9513 && pow2p_hwi (nz
= nonzero_bits (x
, mode
)))
9515 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
9519 /* Otherwise fail; show no condition with true and false values the same. */
9520 *ptrue
= *pfalse
= x
;
9524 /* Return the value of expression X given the fact that condition COND
9525 is known to be true when applied to REG as its first operand and VAL
9526 as its second. X is known to not be shared and so can be modified in
9529 We only handle the simplest cases, and specifically those cases that
9530 arise with IF_THEN_ELSE expressions. */
9533 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
9535 enum rtx_code code
= GET_CODE (x
);
9539 if (side_effects_p (x
))
9542 /* If either operand of the condition is a floating point value,
9543 then we have to avoid collapsing an EQ comparison. */
9545 && rtx_equal_p (x
, reg
)
9546 && ! FLOAT_MODE_P (GET_MODE (x
))
9547 && ! FLOAT_MODE_P (GET_MODE (val
)))
9550 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
9553 /* If X is (abs REG) and we know something about REG's relationship
9554 with zero, we may be able to simplify this. */
9556 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
9559 case GE
: case GT
: case EQ
:
9562 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
9564 GET_MODE (XEXP (x
, 0)));
9569 /* The only other cases we handle are MIN, MAX, and comparisons if the
9570 operands are the same as REG and VAL. */
9572 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
9574 if (rtx_equal_p (XEXP (x
, 0), val
))
9576 std::swap (val
, reg
);
9577 cond
= swap_condition (cond
);
9580 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
9582 if (COMPARISON_P (x
))
9584 if (comparison_dominates_p (cond
, code
))
9585 return VECTOR_MODE_P (GET_MODE (x
)) ? x
: const_true_rtx
;
9587 code
= reversed_comparison_code (x
, NULL
);
9589 && comparison_dominates_p (cond
, code
))
9590 return CONST0_RTX (GET_MODE (x
));
9594 else if (code
== SMAX
|| code
== SMIN
9595 || code
== UMIN
|| code
== UMAX
)
9597 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
9599 /* Do not reverse the condition when it is NE or EQ.
9600 This is because we cannot conclude anything about
9601 the value of 'SMAX (x, y)' when x is not equal to y,
9602 but we can when x equals y. */
9603 if ((code
== SMAX
|| code
== UMAX
)
9604 && ! (cond
== EQ
|| cond
== NE
))
9605 cond
= reverse_condition (cond
);
9610 return unsignedp
? x
: XEXP (x
, 1);
9612 return unsignedp
? x
: XEXP (x
, 0);
9614 return unsignedp
? XEXP (x
, 1) : x
;
9616 return unsignedp
? XEXP (x
, 0) : x
;
9623 else if (code
== SUBREG
)
9625 machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
9626 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
9628 if (SUBREG_REG (x
) != r
)
9630 /* We must simplify subreg here, before we lose track of the
9631 original inner_mode. */
9632 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
9633 inner_mode
, SUBREG_BYTE (x
));
9637 SUBST (SUBREG_REG (x
), r
);
9642 /* We don't have to handle SIGN_EXTEND here, because even in the
9643 case of replacing something with a modeless CONST_INT, a
9644 CONST_INT is already (supposed to be) a valid sign extension for
9645 its narrower mode, which implies it's already properly
9646 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9647 story is different. */
9648 else if (code
== ZERO_EXTEND
)
9650 machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
9651 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
9653 if (XEXP (x
, 0) != r
)
9655 /* We must simplify the zero_extend here, before we lose
9656 track of the original inner_mode. */
9657 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
9662 SUBST (XEXP (x
, 0), r
);
9668 fmt
= GET_RTX_FORMAT (code
);
9669 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9672 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
9673 else if (fmt
[i
] == 'E')
9674 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9675 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
9682 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9683 assignment as a field assignment. */
9686 rtx_equal_for_field_assignment_p (rtx x
, rtx y
, bool widen_x
)
9688 if (widen_x
&& GET_MODE (x
) != GET_MODE (y
))
9690 if (paradoxical_subreg_p (GET_MODE (x
), GET_MODE (y
)))
9692 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
9694 x
= adjust_address_nv (x
, GET_MODE (y
),
9695 byte_lowpart_offset (GET_MODE (y
),
9699 if (x
== y
|| rtx_equal_p (x
, y
))
9702 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
9705 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9706 Note that all SUBREGs of MEM are paradoxical; otherwise they
9707 would have been rewritten. */
9708 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
9709 && MEM_P (SUBREG_REG (y
))
9710 && rtx_equal_p (SUBREG_REG (y
),
9711 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
9714 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
9715 && MEM_P (SUBREG_REG (x
))
9716 && rtx_equal_p (SUBREG_REG (x
),
9717 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
9720 /* We used to see if get_last_value of X and Y were the same but that's
9721 not correct. In one direction, we'll cause the assignment to have
9722 the wrong destination and in the case, we'll import a register into this
9723 insn that might have already have been dead. So fail if none of the
9724 above cases are true. */
9728 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9729 Return that assignment if so.
9731 We only handle the most common cases. */
9734 make_field_assignment (rtx x
)
9736 rtx dest
= SET_DEST (x
);
9737 rtx src
= SET_SRC (x
);
9742 unsigned HOST_WIDE_INT len
;
9745 /* All the rules in this function are specific to scalar integers. */
9746 scalar_int_mode mode
;
9747 if (!is_a
<scalar_int_mode
> (GET_MODE (dest
), &mode
))
9750 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9751 a clear of a one-bit field. We will have changed it to
9752 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9755 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
9756 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
9757 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
9758 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9760 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9763 return gen_rtx_SET (assign
, const0_rtx
);
9767 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
9768 && subreg_lowpart_p (XEXP (src
, 0))
9769 && partial_subreg_p (XEXP (src
, 0))
9770 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9771 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9772 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9773 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9775 assign
= make_extraction (VOIDmode
, dest
, 0,
9776 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9779 return gen_rtx_SET (assign
, const0_rtx
);
9783 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9785 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9786 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9787 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9789 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9792 return gen_rtx_SET (assign
, const1_rtx
);
9796 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9797 SRC is an AND with all bits of that field set, then we can discard
9799 if (GET_CODE (dest
) == ZERO_EXTRACT
9800 && CONST_INT_P (XEXP (dest
, 1))
9801 && GET_CODE (src
) == AND
9802 && CONST_INT_P (XEXP (src
, 1)))
9804 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9805 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9806 unsigned HOST_WIDE_INT ze_mask
;
9808 if (width
>= HOST_BITS_PER_WIDE_INT
)
9811 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9813 /* Complete overlap. We can remove the source AND. */
9814 if ((and_mask
& ze_mask
) == ze_mask
)
9815 return gen_rtx_SET (dest
, XEXP (src
, 0));
9817 /* Partial overlap. We can reduce the source AND. */
9818 if ((and_mask
& ze_mask
) != and_mask
)
9820 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9821 gen_int_mode (and_mask
& ze_mask
, mode
));
9822 return gen_rtx_SET (dest
, src
);
9826 /* The other case we handle is assignments into a constant-position
9827 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9828 a mask that has all one bits except for a group of zero bits and
9829 OTHER is known to have zeros where C1 has ones, this is such an
9830 assignment. Compute the position and length from C1. Shift OTHER
9831 to the appropriate position, force it to the required mode, and
9832 make the extraction. Check for the AND in both operands. */
9834 /* One or more SUBREGs might obscure the constant-position field
9835 assignment. The first one we are likely to encounter is an outer
9836 narrowing SUBREG, which we can just strip for the purposes of
9837 identifying the constant-field assignment. */
9838 scalar_int_mode src_mode
= mode
;
9839 if (GET_CODE (src
) == SUBREG
9840 && subreg_lowpart_p (src
)
9841 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (src
)), &src_mode
))
9842 src
= SUBREG_REG (src
);
9844 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9847 rhs
= expand_compound_operation (XEXP (src
, 0));
9848 lhs
= expand_compound_operation (XEXP (src
, 1));
9850 if (GET_CODE (rhs
) == AND
9851 && CONST_INT_P (XEXP (rhs
, 1))
9852 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9853 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9854 /* The second SUBREG that might get in the way is a paradoxical
9855 SUBREG around the first operand of the AND. We want to
9856 pretend the operand is as wide as the destination here. We
9857 do this by adjusting the MEM to wider mode for the sole
9858 purpose of the call to rtx_equal_for_field_assignment_p. Also
9859 note this trick only works for MEMs. */
9860 else if (GET_CODE (rhs
) == AND
9861 && paradoxical_subreg_p (XEXP (rhs
, 0))
9862 && MEM_P (SUBREG_REG (XEXP (rhs
, 0)))
9863 && CONST_INT_P (XEXP (rhs
, 1))
9864 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs
, 0)),
9866 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9867 else if (GET_CODE (lhs
) == AND
9868 && CONST_INT_P (XEXP (lhs
, 1))
9869 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9870 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9871 /* The second SUBREG that might get in the way is a paradoxical
9872 SUBREG around the first operand of the AND. We want to
9873 pretend the operand is as wide as the destination here. We
9874 do this by adjusting the MEM to wider mode for the sole
9875 purpose of the call to rtx_equal_for_field_assignment_p. Also
9876 note this trick only works for MEMs. */
9877 else if (GET_CODE (lhs
) == AND
9878 && paradoxical_subreg_p (XEXP (lhs
, 0))
9879 && MEM_P (SUBREG_REG (XEXP (lhs
, 0)))
9880 && CONST_INT_P (XEXP (lhs
, 1))
9881 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs
, 0)),
9883 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9887 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (mode
), &len
);
9889 || pos
+ len
> GET_MODE_PRECISION (mode
)
9890 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
9891 || (c1
& nonzero_bits (other
, mode
)) != 0)
9894 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9898 /* The mode to use for the source is the mode of the assignment, or of
9899 what is inside a possible STRICT_LOW_PART. */
9900 machine_mode new_mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9901 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9903 /* Shift OTHER right POS places and make it the source, restricting it
9904 to the proper length and mode. */
9906 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9907 src_mode
, other
, pos
),
9909 src
= force_to_mode (src
, new_mode
,
9910 len
>= HOST_BITS_PER_WIDE_INT
9912 : (HOST_WIDE_INT_1U
<< len
) - 1,
9915 /* If SRC is masked by an AND that does not make a difference in
9916 the value being stored, strip it. */
9917 if (GET_CODE (assign
) == ZERO_EXTRACT
9918 && CONST_INT_P (XEXP (assign
, 1))
9919 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9920 && GET_CODE (src
) == AND
9921 && CONST_INT_P (XEXP (src
, 1))
9922 && UINTVAL (XEXP (src
, 1))
9923 == (HOST_WIDE_INT_1U
<< INTVAL (XEXP (assign
, 1))) - 1)
9924 src
= XEXP (src
, 0);
9926 return gen_rtx_SET (assign
, src
);
9929 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9933 apply_distributive_law (rtx x
)
9935 enum rtx_code code
= GET_CODE (x
);
9936 enum rtx_code inner_code
;
9937 rtx lhs
, rhs
, other
;
9940 /* Distributivity is not true for floating point as it can change the
9941 value. So we don't do it unless -funsafe-math-optimizations. */
9942 if (FLOAT_MODE_P (GET_MODE (x
))
9943 && ! flag_unsafe_math_optimizations
)
9946 /* The outer operation can only be one of the following: */
9947 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9948 && code
!= PLUS
&& code
!= MINUS
)
9954 /* If either operand is a primitive we can't do anything, so get out
9956 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9959 lhs
= expand_compound_operation (lhs
);
9960 rhs
= expand_compound_operation (rhs
);
9961 inner_code
= GET_CODE (lhs
);
9962 if (inner_code
!= GET_CODE (rhs
))
9965 /* See if the inner and outer operations distribute. */
9972 /* These all distribute except over PLUS. */
9973 if (code
== PLUS
|| code
== MINUS
)
9978 if (code
!= PLUS
&& code
!= MINUS
)
9983 /* This is also a multiply, so it distributes over everything. */
9986 /* This used to handle SUBREG, but this turned out to be counter-
9987 productive, since (subreg (op ...)) usually is not handled by
9988 insn patterns, and this "optimization" therefore transformed
9989 recognizable patterns into unrecognizable ones. Therefore the
9990 SUBREG case was removed from here.
9992 It is possible that distributing SUBREG over arithmetic operations
9993 leads to an intermediate result than can then be optimized further,
9994 e.g. by moving the outer SUBREG to the other side of a SET as done
9995 in simplify_set. This seems to have been the original intent of
9996 handling SUBREGs here.
9998 However, with current GCC this does not appear to actually happen,
9999 at least on major platforms. If some case is found where removing
10000 the SUBREG case here prevents follow-on optimizations, distributing
10001 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
10007 /* Set LHS and RHS to the inner operands (A and B in the example
10008 above) and set OTHER to the common operand (C in the example).
10009 There is only one way to do this unless the inner operation is
10011 if (COMMUTATIVE_ARITH_P (lhs
)
10012 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
10013 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
10014 else if (COMMUTATIVE_ARITH_P (lhs
)
10015 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
10016 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
10017 else if (COMMUTATIVE_ARITH_P (lhs
)
10018 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
10019 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
10020 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
10021 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
10025 /* Form the new inner operation, seeing if it simplifies first. */
10026 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
10028 /* There is one exception to the general way of distributing:
10029 (a | c) ^ (b | c) -> (a ^ b) & ~c */
10030 if (code
== XOR
&& inner_code
== IOR
)
10033 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
10036 /* We may be able to continuing distributing the result, so call
10037 ourselves recursively on the inner operation before forming the
10038 outer operation, which we return. */
10039 return simplify_gen_binary (inner_code
, GET_MODE (x
),
10040 apply_distributive_law (tem
), other
);
10043 /* See if X is of the form (* (+ A B) C), and if so convert to
10044 (+ (* A C) (* B C)) and try to simplify.
10046 Most of the time, this results in no change. However, if some of
10047 the operands are the same or inverses of each other, simplifications
10050 For example, (and (ior A B) (not B)) can occur as the result of
10051 expanding a bit field assignment. When we apply the distributive
10052 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
10053 which then simplifies to (and (A (not B))).
10055 Note that no checks happen on the validity of applying the inverse
10056 distributive law. This is pointless since we can do it in the
10057 few places where this routine is called.
10059 N is the index of the term that is decomposed (the arithmetic operation,
10060 i.e. (+ A B) in the first example above). !N is the index of the term that
10061 is distributed, i.e. of C in the first example above. */
10063 distribute_and_simplify_rtx (rtx x
, int n
)
10066 enum rtx_code outer_code
, inner_code
;
10067 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
10069 /* Distributivity is not true for floating point as it can change the
10070 value. So we don't do it unless -funsafe-math-optimizations. */
10071 if (FLOAT_MODE_P (GET_MODE (x
))
10072 && ! flag_unsafe_math_optimizations
)
10075 decomposed
= XEXP (x
, n
);
10076 if (!ARITHMETIC_P (decomposed
))
10079 mode
= GET_MODE (x
);
10080 outer_code
= GET_CODE (x
);
10081 distributed
= XEXP (x
, !n
);
10083 inner_code
= GET_CODE (decomposed
);
10084 inner_op0
= XEXP (decomposed
, 0);
10085 inner_op1
= XEXP (decomposed
, 1);
10087 /* Special case (and (xor B C) (not A)), which is equivalent to
10088 (xor (ior A B) (ior A C)) */
10089 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
10091 distributed
= XEXP (distributed
, 0);
10097 /* Distribute the second term. */
10098 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
10099 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
10103 /* Distribute the first term. */
10104 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
10105 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
10108 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
10109 new_op0
, new_op1
));
10110 if (GET_CODE (tmp
) != outer_code
10111 && (set_src_cost (tmp
, mode
, optimize_this_for_speed_p
)
10112 < set_src_cost (x
, mode
, optimize_this_for_speed_p
)))
10118 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
10119 in MODE. Return an equivalent form, if different from (and VAROP
10120 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
10123 simplify_and_const_int_1 (scalar_int_mode mode
, rtx varop
,
10124 unsigned HOST_WIDE_INT constop
)
10126 unsigned HOST_WIDE_INT nonzero
;
10127 unsigned HOST_WIDE_INT orig_constop
;
10131 orig_varop
= varop
;
10132 orig_constop
= constop
;
10133 if (GET_CODE (varop
) == CLOBBER
)
10136 /* Simplify VAROP knowing that we will be only looking at some of the
10139 Note by passing in CONSTOP, we guarantee that the bits not set in
10140 CONSTOP are not significant and will never be examined. We must
10141 ensure that is the case by explicitly masking out those bits
10142 before returning. */
10143 varop
= force_to_mode (varop
, mode
, constop
, 0);
10145 /* If VAROP is a CLOBBER, we will fail so return it. */
10146 if (GET_CODE (varop
) == CLOBBER
)
10149 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10150 to VAROP and return the new constant. */
10151 if (CONST_INT_P (varop
))
10152 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
10154 /* See what bits may be nonzero in VAROP. Unlike the general case of
10155 a call to nonzero_bits, here we don't care about bits outside
10158 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
10160 /* Turn off all bits in the constant that are known to already be zero.
10161 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10162 which is tested below. */
10164 constop
&= nonzero
;
10166 /* If we don't have any bits left, return zero. */
10170 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10171 a power of two, we can replace this with an ASHIFT. */
10172 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
10173 && (i
= exact_log2 (constop
)) >= 0)
10174 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
10176 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10177 or XOR, then try to apply the distributive law. This may eliminate
10178 operations if either branch can be simplified because of the AND.
10179 It may also make some cases more complex, but those cases probably
10180 won't match a pattern either with or without this. */
10182 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
10184 scalar_int_mode varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10188 apply_distributive_law
10189 (simplify_gen_binary (GET_CODE (varop
), varop_mode
,
10190 simplify_and_const_int (NULL_RTX
, varop_mode
,
10193 simplify_and_const_int (NULL_RTX
, varop_mode
,
10198 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10199 the AND and see if one of the operands simplifies to zero. If so, we
10200 may eliminate it. */
10202 if (GET_CODE (varop
) == PLUS
10203 && pow2p_hwi (constop
+ 1))
10207 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
10208 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
10209 if (o0
== const0_rtx
)
10211 if (o1
== const0_rtx
)
10215 /* Make a SUBREG if necessary. If we can't make it, fail. */
10216 varop
= gen_lowpart (mode
, varop
);
10217 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
10220 /* If we are only masking insignificant bits, return VAROP. */
10221 if (constop
== nonzero
)
10224 if (varop
== orig_varop
&& constop
== orig_constop
)
10227 /* Otherwise, return an AND. */
10228 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
10232 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10235 Return an equivalent form, if different from X. Otherwise, return X. If
10236 X is zero, we are to always construct the equivalent form. */
10239 simplify_and_const_int (rtx x
, scalar_int_mode mode
, rtx varop
,
10240 unsigned HOST_WIDE_INT constop
)
10242 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
10247 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
10248 gen_int_mode (constop
, mode
));
10249 if (GET_MODE (x
) != mode
)
10250 x
= gen_lowpart (mode
, x
);
10254 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10255 We don't care about bits outside of those defined in MODE.
10256 We DO care about all the bits in MODE, even if XMODE is smaller than MODE.
10258 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10259 a shift, AND, or zero_extract, we can do better. */
10262 reg_nonzero_bits_for_combine (const_rtx x
, scalar_int_mode xmode
,
10263 scalar_int_mode mode
,
10264 unsigned HOST_WIDE_INT
*nonzero
)
10267 reg_stat_type
*rsp
;
10269 /* If X is a register whose nonzero bits value is current, use it.
10270 Otherwise, if X is a register whose value we can find, use that
10271 value. Otherwise, use the previously-computed global nonzero bits
10272 for this register. */
10274 rsp
= ®_stat
[REGNO (x
)];
10275 if (rsp
->last_set_value
!= 0
10276 && (rsp
->last_set_mode
== mode
10277 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10278 && GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
10279 && GET_MODE_CLASS (mode
) == MODE_INT
))
10280 && ((rsp
->last_set_label
>= label_tick_ebb_start
10281 && rsp
->last_set_label
< label_tick
)
10282 || (rsp
->last_set_label
== label_tick
10283 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10284 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10285 && REGNO (x
) < reg_n_sets_max
10286 && REG_N_SETS (REGNO (x
)) == 1
10287 && !REGNO_REG_SET_P
10288 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10291 /* Note that, even if the precision of last_set_mode is lower than that
10292 of mode, record_value_for_reg invoked nonzero_bits on the register
10293 with nonzero_bits_mode (because last_set_mode is necessarily integral
10294 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10295 are all valid, hence in mode too since nonzero_bits_mode is defined
10296 to the largest HWI_COMPUTABLE_MODE_P mode. */
10297 *nonzero
&= rsp
->last_set_nonzero_bits
;
10301 tem
= get_last_value (x
);
10304 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
10305 tem
= sign_extend_short_imm (tem
, xmode
, GET_MODE_PRECISION (mode
));
10310 if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
10312 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
10314 if (GET_MODE_PRECISION (xmode
) < GET_MODE_PRECISION (mode
))
10315 /* We don't know anything about the upper bits. */
10316 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (xmode
);
10324 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10325 end of X that are known to be equal to the sign bit. X will be used
10326 in mode MODE; the returned value will always be between 1 and the
10327 number of bits in MODE. */
10330 reg_num_sign_bit_copies_for_combine (const_rtx x
, scalar_int_mode xmode
,
10331 scalar_int_mode mode
,
10332 unsigned int *result
)
10335 reg_stat_type
*rsp
;
10337 rsp
= ®_stat
[REGNO (x
)];
10338 if (rsp
->last_set_value
!= 0
10339 && rsp
->last_set_mode
== mode
10340 && ((rsp
->last_set_label
>= label_tick_ebb_start
10341 && rsp
->last_set_label
< label_tick
)
10342 || (rsp
->last_set_label
== label_tick
10343 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10344 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10345 && REGNO (x
) < reg_n_sets_max
10346 && REG_N_SETS (REGNO (x
)) == 1
10347 && !REGNO_REG_SET_P
10348 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10351 *result
= rsp
->last_set_sign_bit_copies
;
10355 tem
= get_last_value (x
);
10359 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
10360 && GET_MODE_PRECISION (xmode
) == GET_MODE_PRECISION (mode
))
10361 *result
= rsp
->sign_bit_copies
;
10366 /* Return the number of "extended" bits there are in X, when interpreted
10367 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10368 unsigned quantities, this is the number of high-order zero bits.
10369 For signed quantities, this is the number of copies of the sign bit
10370 minus 1. In both case, this function returns the number of "spare"
10371 bits. For example, if two quantities for which this function returns
10372 at least 1 are added, the addition is known not to overflow.
10374 This function will always return 0 unless called during combine, which
10375 implies that it must be called from a define_split. */
10378 extended_count (const_rtx x
, machine_mode mode
, int unsignedp
)
10380 if (nonzero_sign_valid
== 0)
10383 scalar_int_mode int_mode
;
10385 ? (is_a
<scalar_int_mode
> (mode
, &int_mode
)
10386 && HWI_COMPUTABLE_MODE_P (int_mode
)
10387 ? (unsigned int) (GET_MODE_PRECISION (int_mode
) - 1
10388 - floor_log2 (nonzero_bits (x
, int_mode
)))
10390 : num_sign_bit_copies (x
, mode
) - 1);
10393 /* This function is called from `simplify_shift_const' to merge two
10394 outer operations. Specifically, we have already found that we need
10395 to perform operation *POP0 with constant *PCONST0 at the outermost
10396 position. We would now like to also perform OP1 with constant CONST1
10397 (with *POP0 being done last).
10399 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10400 the resulting operation. *PCOMP_P is set to 1 if we would need to
10401 complement the innermost operand, otherwise it is unchanged.
10403 MODE is the mode in which the operation will be done. No bits outside
10404 the width of this mode matter. It is assumed that the width of this mode
10405 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10407 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10408 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10409 result is simply *PCONST0.
10411 If the resulting operation cannot be expressed as one operation, we
10412 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10415 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, machine_mode mode
, int *pcomp_p
)
10417 enum rtx_code op0
= *pop0
;
10418 HOST_WIDE_INT const0
= *pconst0
;
10420 const0
&= GET_MODE_MASK (mode
);
10421 const1
&= GET_MODE_MASK (mode
);
10423 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10427 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10430 if (op1
== UNKNOWN
|| op0
== SET
)
10433 else if (op0
== UNKNOWN
)
10434 op0
= op1
, const0
= const1
;
10436 else if (op0
== op1
)
10460 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10461 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
10464 /* If the two constants aren't the same, we can't do anything. The
10465 remaining six cases can all be done. */
10466 else if (const0
!= const1
)
10474 /* (a & b) | b == b */
10476 else /* op1 == XOR */
10477 /* (a ^ b) | b == a | b */
10483 /* (a & b) ^ b == (~a) & b */
10484 op0
= AND
, *pcomp_p
= 1;
10485 else /* op1 == IOR */
10486 /* (a | b) ^ b == a & ~b */
10487 op0
= AND
, const0
= ~const0
;
10492 /* (a | b) & b == b */
10494 else /* op1 == XOR */
10495 /* (a ^ b) & b) == (~a) & b */
10502 /* Check for NO-OP cases. */
10503 const0
&= GET_MODE_MASK (mode
);
10505 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
10507 else if (const0
== 0 && op0
== AND
)
10509 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
10515 /* ??? Slightly redundant with the above mask, but not entirely.
10516 Moving this above means we'd have to sign-extend the mode mask
10517 for the final test. */
10518 if (op0
!= UNKNOWN
&& op0
!= NEG
)
10519 *pconst0
= trunc_int_for_mode (const0
, mode
);
10524 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10525 the shift in. The original shift operation CODE is performed on OP in
10526 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10527 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10528 result of the shift is subject to operation OUTER_CODE with operand
10531 static scalar_int_mode
10532 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
10533 scalar_int_mode orig_mode
, scalar_int_mode mode
,
10534 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
10536 gcc_assert (GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (orig_mode
));
10538 /* In general we can't perform in wider mode for right shift and rotate. */
10542 /* We can still widen if the bits brought in from the left are identical
10543 to the sign bit of ORIG_MODE. */
10544 if (num_sign_bit_copies (op
, mode
)
10545 > (unsigned) (GET_MODE_PRECISION (mode
)
10546 - GET_MODE_PRECISION (orig_mode
)))
10551 /* Similarly here but with zero bits. */
10552 if (HWI_COMPUTABLE_MODE_P (mode
)
10553 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
10556 /* We can also widen if the bits brought in will be masked off. This
10557 operation is performed in ORIG_MODE. */
10558 if (outer_code
== AND
)
10560 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
10563 && GET_MODE_PRECISION (orig_mode
) - care_bits
>= count
)
10572 gcc_unreachable ();
10579 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10580 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10581 if we cannot simplify it. Otherwise, return a simplified value.
10583 The shift is normally computed in the widest mode we find in VAROP, as
10584 long as it isn't a different number of words than RESULT_MODE. Exceptions
10585 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10588 simplify_shift_const_1 (enum rtx_code code
, machine_mode result_mode
,
10589 rtx varop
, int orig_count
)
10591 enum rtx_code orig_code
= code
;
10592 rtx orig_varop
= varop
;
10594 machine_mode mode
= result_mode
;
10595 machine_mode shift_mode
;
10596 scalar_int_mode tmode
, inner_mode
, int_mode
, int_varop_mode
, int_result_mode
;
10597 /* We form (outer_op (code varop count) (outer_const)). */
10598 enum rtx_code outer_op
= UNKNOWN
;
10599 HOST_WIDE_INT outer_const
= 0;
10600 int complement_p
= 0;
10603 /* Make sure and truncate the "natural" shift on the way in. We don't
10604 want to do this inside the loop as it makes it more difficult to
10606 if (SHIFT_COUNT_TRUNCATED
)
10607 orig_count
&= GET_MODE_UNIT_BITSIZE (mode
) - 1;
10609 /* If we were given an invalid count, don't do anything except exactly
10610 what was requested. */
10612 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_UNIT_PRECISION (mode
))
10615 count
= orig_count
;
10617 /* Unless one of the branches of the `if' in this loop does a `continue',
10618 we will `break' the loop after the `if'. */
10622 /* If we have an operand of (clobber (const_int 0)), fail. */
10623 if (GET_CODE (varop
) == CLOBBER
)
10626 /* Convert ROTATERT to ROTATE. */
10627 if (code
== ROTATERT
)
10629 unsigned int bitsize
= GET_MODE_UNIT_PRECISION (result_mode
);
10631 count
= bitsize
- count
;
10634 shift_mode
= result_mode
;
10635 if (shift_mode
!= mode
)
10637 /* We only change the modes of scalar shifts. */
10638 int_mode
= as_a
<scalar_int_mode
> (mode
);
10639 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10640 shift_mode
= try_widen_shift_mode (code
, varop
, count
,
10641 int_result_mode
, int_mode
,
10642 outer_op
, outer_const
);
10645 scalar_int_mode shift_unit_mode
10646 = as_a
<scalar_int_mode
> (GET_MODE_INNER (shift_mode
));
10648 /* Handle cases where the count is greater than the size of the mode
10649 minus 1. For ASHIFT, use the size minus one as the count (this can
10650 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10651 take the count modulo the size. For other shifts, the result is
10654 Since these shifts are being produced by the compiler by combining
10655 multiple operations, each of which are defined, we know what the
10656 result is supposed to be. */
10658 if (count
> (GET_MODE_PRECISION (shift_unit_mode
) - 1))
10660 if (code
== ASHIFTRT
)
10661 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10662 else if (code
== ROTATE
|| code
== ROTATERT
)
10663 count
%= GET_MODE_PRECISION (shift_unit_mode
);
10666 /* We can't simply return zero because there may be an
10668 varop
= const0_rtx
;
10674 /* If we discovered we had to complement VAROP, leave. Making a NOT
10675 here would cause an infinite loop. */
10679 if (shift_mode
== shift_unit_mode
)
10681 /* An arithmetic right shift of a quantity known to be -1 or 0
10683 if (code
== ASHIFTRT
10684 && (num_sign_bit_copies (varop
, shift_unit_mode
)
10685 == GET_MODE_PRECISION (shift_unit_mode
)))
10691 /* If we are doing an arithmetic right shift and discarding all but
10692 the sign bit copies, this is equivalent to doing a shift by the
10693 bitsize minus one. Convert it into that shift because it will
10694 often allow other simplifications. */
10696 if (code
== ASHIFTRT
10697 && (count
+ num_sign_bit_copies (varop
, shift_unit_mode
)
10698 >= GET_MODE_PRECISION (shift_unit_mode
)))
10699 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10701 /* We simplify the tests below and elsewhere by converting
10702 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10703 `make_compound_operation' will convert it to an ASHIFTRT for
10704 those machines (such as VAX) that don't have an LSHIFTRT. */
10705 if (code
== ASHIFTRT
10706 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10707 && val_signbit_known_clear_p (shift_unit_mode
,
10708 nonzero_bits (varop
,
10712 if (((code
== LSHIFTRT
10713 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10714 && !(nonzero_bits (varop
, shift_unit_mode
) >> count
))
10716 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10717 && !((nonzero_bits (varop
, shift_unit_mode
) << count
)
10718 & GET_MODE_MASK (shift_unit_mode
))))
10719 && !side_effects_p (varop
))
10720 varop
= const0_rtx
;
10723 switch (GET_CODE (varop
))
10729 new_rtx
= expand_compound_operation (varop
);
10730 if (new_rtx
!= varop
)
10738 /* The following rules apply only to scalars. */
10739 if (shift_mode
!= shift_unit_mode
)
10741 int_mode
= as_a
<scalar_int_mode
> (mode
);
10743 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10744 minus the width of a smaller mode, we can do this with a
10745 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10746 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10747 && ! mode_dependent_address_p (XEXP (varop
, 0),
10748 MEM_ADDR_SPACE (varop
))
10749 && ! MEM_VOLATILE_P (varop
)
10750 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode
) - count
, 1)
10753 new_rtx
= adjust_address_nv (varop
, tmode
,
10754 BYTES_BIG_ENDIAN
? 0
10755 : count
/ BITS_PER_UNIT
);
10757 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
10758 : ZERO_EXTEND
, int_mode
, new_rtx
);
10765 /* The following rules apply only to scalars. */
10766 if (shift_mode
!= shift_unit_mode
)
10768 int_mode
= as_a
<scalar_int_mode
> (mode
);
10769 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10771 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10772 the same number of words as what we've seen so far. Then store
10773 the widest mode in MODE. */
10774 if (subreg_lowpart_p (varop
)
10775 && is_int_mode (GET_MODE (SUBREG_REG (varop
)), &inner_mode
)
10776 && GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_varop_mode
)
10777 && (CEIL (GET_MODE_SIZE (inner_mode
), UNITS_PER_WORD
)
10778 == CEIL (GET_MODE_SIZE (int_mode
), UNITS_PER_WORD
))
10779 && GET_MODE_CLASS (int_varop_mode
) == MODE_INT
)
10781 varop
= SUBREG_REG (varop
);
10782 if (GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_mode
))
10789 /* Some machines use MULT instead of ASHIFT because MULT
10790 is cheaper. But it is still better on those machines to
10791 merge two shifts into one. */
10792 if (CONST_INT_P (XEXP (varop
, 1))
10793 && (log2
= exact_log2 (UINTVAL (XEXP (varop
, 1)))) >= 0)
10795 rtx log2_rtx
= gen_int_shift_amount (GET_MODE (varop
), log2
);
10796 varop
= simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
10797 XEXP (varop
, 0), log2_rtx
);
10803 /* Similar, for when divides are cheaper. */
10804 if (CONST_INT_P (XEXP (varop
, 1))
10805 && (log2
= exact_log2 (UINTVAL (XEXP (varop
, 1)))) >= 0)
10807 rtx log2_rtx
= gen_int_shift_amount (GET_MODE (varop
), log2
);
10808 varop
= simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10809 XEXP (varop
, 0), log2_rtx
);
10815 /* If we are extracting just the sign bit of an arithmetic
10816 right shift, that shift is not needed. However, the sign
10817 bit of a wider mode may be different from what would be
10818 interpreted as the sign bit in a narrower mode, so, if
10819 the result is narrower, don't discard the shift. */
10820 if (code
== LSHIFTRT
10821 && count
== (GET_MODE_UNIT_BITSIZE (result_mode
) - 1)
10822 && (GET_MODE_UNIT_BITSIZE (result_mode
)
10823 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop
))))
10825 varop
= XEXP (varop
, 0);
10834 /* The following rules apply only to scalars. */
10835 if (shift_mode
!= shift_unit_mode
)
10837 int_mode
= as_a
<scalar_int_mode
> (mode
);
10838 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10839 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10841 /* Here we have two nested shifts. The result is usually the
10842 AND of a new shift with a mask. We compute the result below. */
10843 if (CONST_INT_P (XEXP (varop
, 1))
10844 && INTVAL (XEXP (varop
, 1)) >= 0
10845 && INTVAL (XEXP (varop
, 1)) < GET_MODE_PRECISION (int_varop_mode
)
10846 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
10847 && HWI_COMPUTABLE_MODE_P (int_mode
))
10849 enum rtx_code first_code
= GET_CODE (varop
);
10850 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10851 unsigned HOST_WIDE_INT mask
;
10854 /* We have one common special case. We can't do any merging if
10855 the inner code is an ASHIFTRT of a smaller mode. However, if
10856 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10857 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10858 we can convert it to
10859 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10860 This simplifies certain SIGN_EXTEND operations. */
10861 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10862 && count
== (GET_MODE_PRECISION (int_result_mode
)
10863 - GET_MODE_PRECISION (int_varop_mode
)))
10865 /* C3 has the low-order C1 bits zero. */
10867 mask
= GET_MODE_MASK (int_mode
)
10868 & ~((HOST_WIDE_INT_1U
<< first_count
) - 1);
10870 varop
= simplify_and_const_int (NULL_RTX
, int_result_mode
,
10871 XEXP (varop
, 0), mask
);
10872 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
,
10873 int_result_mode
, varop
, count
);
10874 count
= first_count
;
10879 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10880 than C1 high-order bits equal to the sign bit, we can convert
10881 this to either an ASHIFT or an ASHIFTRT depending on the
10884 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10886 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10887 && int_varop_mode
== shift_unit_mode
10888 && (num_sign_bit_copies (XEXP (varop
, 0), shift_unit_mode
)
10891 varop
= XEXP (varop
, 0);
10892 count
-= first_count
;
10902 /* There are some cases we can't do. If CODE is ASHIFTRT,
10903 we can only do this if FIRST_CODE is also ASHIFTRT.
10905 We can't do the case when CODE is ROTATE and FIRST_CODE is
10908 If the mode of this shift is not the mode of the outer shift,
10909 we can't do this if either shift is a right shift or ROTATE.
10911 Finally, we can't do any of these if the mode is too wide
10912 unless the codes are the same.
10914 Handle the case where the shift codes are the same
10917 if (code
== first_code
)
10919 if (int_varop_mode
!= int_result_mode
10920 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10921 || code
== ROTATE
))
10924 count
+= first_count
;
10925 varop
= XEXP (varop
, 0);
10929 if (code
== ASHIFTRT
10930 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10931 || GET_MODE_PRECISION (int_mode
) > HOST_BITS_PER_WIDE_INT
10932 || (int_varop_mode
!= int_result_mode
10933 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10934 || first_code
== ROTATE
10935 || code
== ROTATE
)))
10938 /* To compute the mask to apply after the shift, shift the
10939 nonzero bits of the inner shift the same way the
10940 outer shift will. */
10942 mask_rtx
= gen_int_mode (nonzero_bits (varop
, int_varop_mode
),
10944 rtx count_rtx
= gen_int_shift_amount (int_result_mode
, count
);
10946 = simplify_const_binary_operation (code
, int_result_mode
,
10947 mask_rtx
, count_rtx
);
10949 /* Give up if we can't compute an outer operation to use. */
10951 || !CONST_INT_P (mask_rtx
)
10952 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10954 int_result_mode
, &complement_p
))
10957 /* If the shifts are in the same direction, we add the
10958 counts. Otherwise, we subtract them. */
10959 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10960 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10961 count
+= first_count
;
10963 count
-= first_count
;
10965 /* If COUNT is positive, the new shift is usually CODE,
10966 except for the two exceptions below, in which case it is
10967 FIRST_CODE. If the count is negative, FIRST_CODE should
10970 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10971 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10973 else if (count
< 0)
10974 code
= first_code
, count
= -count
;
10976 varop
= XEXP (varop
, 0);
10980 /* If we have (A << B << C) for any shift, we can convert this to
10981 (A << C << B). This wins if A is a constant. Only try this if
10982 B is not a constant. */
10984 else if (GET_CODE (varop
) == code
10985 && CONST_INT_P (XEXP (varop
, 0))
10986 && !CONST_INT_P (XEXP (varop
, 1)))
10988 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10989 sure the result will be masked. See PR70222. */
10990 if (code
== LSHIFTRT
10991 && int_mode
!= int_result_mode
10992 && !merge_outer_ops (&outer_op
, &outer_const
, AND
,
10993 GET_MODE_MASK (int_result_mode
)
10994 >> orig_count
, int_result_mode
,
10997 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10998 up outer sign extension (often left and right shift) is
10999 hardly more efficient than the original. See PR70429. */
11000 if (code
== ASHIFTRT
&& int_mode
!= int_result_mode
)
11003 rtx count_rtx
= gen_int_shift_amount (int_result_mode
, count
);
11004 rtx new_rtx
= simplify_const_binary_operation (code
, int_mode
,
11007 varop
= gen_rtx_fmt_ee (code
, int_mode
, new_rtx
, XEXP (varop
, 1));
11014 /* The following rules apply only to scalars. */
11015 if (shift_mode
!= shift_unit_mode
)
11018 /* Make this fit the case below. */
11019 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0), constm1_rtx
);
11025 /* The following rules apply only to scalars. */
11026 if (shift_mode
!= shift_unit_mode
)
11028 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
11029 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11031 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
11032 with C the size of VAROP - 1 and the shift is logical if
11033 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11034 we have an (le X 0) operation. If we have an arithmetic shift
11035 and STORE_FLAG_VALUE is 1 or we have a logical shift with
11036 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
11038 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
11039 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
11040 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
11041 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
11042 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
11043 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
11046 varop
= gen_rtx_LE (int_varop_mode
, XEXP (varop
, 1),
11049 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
11050 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
11055 /* If we have (shift (logical)), move the logical to the outside
11056 to allow it to possibly combine with another logical and the
11057 shift to combine with another shift. This also canonicalizes to
11058 what a ZERO_EXTRACT looks like. Also, some machines have
11059 (and (shift)) insns. */
11061 if (CONST_INT_P (XEXP (varop
, 1))
11062 /* We can't do this if we have (ashiftrt (xor)) and the
11063 constant has its sign bit set in shift_unit_mode with
11064 shift_unit_mode wider than result_mode. */
11065 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
11066 && int_result_mode
!= shift_unit_mode
11067 && trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
11068 shift_unit_mode
) < 0)
11069 && (new_rtx
= simplify_const_binary_operation
11070 (code
, int_result_mode
,
11071 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11072 gen_int_shift_amount (int_result_mode
, count
))) != 0
11073 && CONST_INT_P (new_rtx
)
11074 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
11075 INTVAL (new_rtx
), int_result_mode
,
11078 varop
= XEXP (varop
, 0);
11082 /* If we can't do that, try to simplify the shift in each arm of the
11083 logical expression, make a new logical expression, and apply
11084 the inverse distributive law. This also can't be done for
11085 (ashiftrt (xor)) where we've widened the shift and the constant
11086 changes the sign bit. */
11087 if (CONST_INT_P (XEXP (varop
, 1))
11088 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
11089 && int_result_mode
!= shift_unit_mode
11090 && trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
11091 shift_unit_mode
) < 0))
11093 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
11094 XEXP (varop
, 0), count
);
11095 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
11096 XEXP (varop
, 1), count
);
11098 varop
= simplify_gen_binary (GET_CODE (varop
), shift_unit_mode
,
11100 varop
= apply_distributive_law (varop
);
11108 /* The following rules apply only to scalars. */
11109 if (shift_mode
!= shift_unit_mode
)
11111 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11113 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
11114 says that the sign bit can be tested, FOO has mode MODE, C is
11115 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
11116 that may be nonzero. */
11117 if (code
== LSHIFTRT
11118 && XEXP (varop
, 1) == const0_rtx
11119 && GET_MODE (XEXP (varop
, 0)) == int_result_mode
11120 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11121 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
11122 && STORE_FLAG_VALUE
== -1
11123 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
11124 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
11125 int_result_mode
, &complement_p
))
11127 varop
= XEXP (varop
, 0);
11134 /* The following rules apply only to scalars. */
11135 if (shift_mode
!= shift_unit_mode
)
11137 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11139 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11140 than the number of bits in the mode is equivalent to A. */
11141 if (code
== LSHIFTRT
11142 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11143 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1)
11145 varop
= XEXP (varop
, 0);
11150 /* NEG commutes with ASHIFT since it is multiplication. Move the
11151 NEG outside to allow shifts to combine. */
11153 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0,
11154 int_result_mode
, &complement_p
))
11156 varop
= XEXP (varop
, 0);
11162 /* The following rules apply only to scalars. */
11163 if (shift_mode
!= shift_unit_mode
)
11165 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11167 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11168 is one less than the number of bits in the mode is
11169 equivalent to (xor A 1). */
11170 if (code
== LSHIFTRT
11171 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11172 && XEXP (varop
, 1) == constm1_rtx
11173 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
11174 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
11175 int_result_mode
, &complement_p
))
11178 varop
= XEXP (varop
, 0);
11182 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11183 that might be nonzero in BAR are those being shifted out and those
11184 bits are known zero in FOO, we can replace the PLUS with FOO.
11185 Similarly in the other operand order. This code occurs when
11186 we are computing the size of a variable-size array. */
11188 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
11189 && count
< HOST_BITS_PER_WIDE_INT
11190 && nonzero_bits (XEXP (varop
, 1), int_result_mode
) >> count
== 0
11191 && (nonzero_bits (XEXP (varop
, 1), int_result_mode
)
11192 & nonzero_bits (XEXP (varop
, 0), int_result_mode
)) == 0)
11194 varop
= XEXP (varop
, 0);
11197 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
11198 && count
< HOST_BITS_PER_WIDE_INT
11199 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
11200 && (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
11202 && (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
11203 & nonzero_bits (XEXP (varop
, 1), int_result_mode
)) == 0)
11205 varop
= XEXP (varop
, 1);
11209 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11211 && CONST_INT_P (XEXP (varop
, 1))
11212 && (new_rtx
= simplify_const_binary_operation
11213 (ASHIFT
, int_result_mode
,
11214 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11215 gen_int_shift_amount (int_result_mode
, count
))) != 0
11216 && CONST_INT_P (new_rtx
)
11217 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
11218 INTVAL (new_rtx
), int_result_mode
,
11221 varop
= XEXP (varop
, 0);
11225 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11226 signbit', and attempt to change the PLUS to an XOR and move it to
11227 the outer operation as is done above in the AND/IOR/XOR case
11228 leg for shift(logical). See details in logical handling above
11229 for reasoning in doing so. */
11230 if (code
== LSHIFTRT
11231 && CONST_INT_P (XEXP (varop
, 1))
11232 && mode_signbit_p (int_result_mode
, XEXP (varop
, 1))
11233 && (new_rtx
= simplify_const_binary_operation
11234 (code
, int_result_mode
,
11235 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11236 gen_int_shift_amount (int_result_mode
, count
))) != 0
11237 && CONST_INT_P (new_rtx
)
11238 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
11239 INTVAL (new_rtx
), int_result_mode
,
11242 varop
= XEXP (varop
, 0);
11249 /* The following rules apply only to scalars. */
11250 if (shift_mode
!= shift_unit_mode
)
11252 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
11254 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11255 with C the size of VAROP - 1 and the shift is logical if
11256 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11257 we have a (gt X 0) operation. If the shift is arithmetic with
11258 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11259 we have a (neg (gt X 0)) operation. */
11261 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
11262 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
11263 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
11264 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
11265 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11266 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
11267 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
11270 varop
= gen_rtx_GT (int_varop_mode
, XEXP (varop
, 1),
11273 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
11274 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
11281 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11282 if the truncate does not affect the value. */
11283 if (code
== LSHIFTRT
11284 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
11285 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11286 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
11287 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop
, 0)))
11288 - GET_MODE_UNIT_PRECISION (GET_MODE (varop
)))))
11290 rtx varop_inner
= XEXP (varop
, 0);
11291 int new_count
= count
+ INTVAL (XEXP (varop_inner
, 1));
11292 rtx new_count_rtx
= gen_int_shift_amount (GET_MODE (varop_inner
),
11294 varop_inner
= gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
11295 XEXP (varop_inner
, 0),
11297 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
11310 shift_mode
= result_mode
;
11311 if (shift_mode
!= mode
)
11313 /* We only change the modes of scalar shifts. */
11314 int_mode
= as_a
<scalar_int_mode
> (mode
);
11315 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11316 shift_mode
= try_widen_shift_mode (code
, varop
, count
, int_result_mode
,
11317 int_mode
, outer_op
, outer_const
);
11320 /* We have now finished analyzing the shift. The result should be
11321 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11322 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11323 to the result of the shift. OUTER_CONST is the relevant constant,
11324 but we must turn off all bits turned off in the shift. */
11326 if (outer_op
== UNKNOWN
11327 && orig_code
== code
&& orig_count
== count
11328 && varop
== orig_varop
11329 && shift_mode
== GET_MODE (varop
))
11332 /* Make a SUBREG if necessary. If we can't make it, fail. */
11333 varop
= gen_lowpart (shift_mode
, varop
);
11334 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
11337 /* If we have an outer operation and we just made a shift, it is
11338 possible that we could have simplified the shift were it not
11339 for the outer operation. So try to do the simplification
11342 if (outer_op
!= UNKNOWN
)
11343 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
11348 x
= simplify_gen_binary (code
, shift_mode
, varop
,
11349 gen_int_shift_amount (shift_mode
, count
));
11351 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11352 turn off all the bits that the shift would have turned off. */
11353 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
11354 /* We only change the modes of scalar shifts. */
11355 x
= simplify_and_const_int (NULL_RTX
, as_a
<scalar_int_mode
> (shift_mode
),
11356 x
, GET_MODE_MASK (result_mode
) >> orig_count
);
11358 /* Do the remainder of the processing in RESULT_MODE. */
11359 x
= gen_lowpart_or_truncate (result_mode
, x
);
11361 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11364 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
11366 if (outer_op
!= UNKNOWN
)
11368 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11370 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
11371 && GET_MODE_PRECISION (int_result_mode
) < HOST_BITS_PER_WIDE_INT
)
11372 outer_const
= trunc_int_for_mode (outer_const
, int_result_mode
);
11374 if (outer_op
== AND
)
11375 x
= simplify_and_const_int (NULL_RTX
, int_result_mode
, x
, outer_const
);
11376 else if (outer_op
== SET
)
11378 /* This means that we have determined that the result is
11379 equivalent to a constant. This should be rare. */
11380 if (!side_effects_p (x
))
11381 x
= GEN_INT (outer_const
);
11383 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
11384 x
= simplify_gen_unary (outer_op
, int_result_mode
, x
, int_result_mode
);
11386 x
= simplify_gen_binary (outer_op
, int_result_mode
, x
,
11387 GEN_INT (outer_const
));
11393 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11394 The result of the shift is RESULT_MODE. If we cannot simplify it,
11395 return X or, if it is NULL, synthesize the expression with
11396 simplify_gen_binary. Otherwise, return a simplified value.
11398 The shift is normally computed in the widest mode we find in VAROP, as
11399 long as it isn't a different number of words than RESULT_MODE. Exceptions
11400 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11403 simplify_shift_const (rtx x
, enum rtx_code code
, machine_mode result_mode
,
11404 rtx varop
, int count
)
11406 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
11411 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
,
11412 gen_int_shift_amount (GET_MODE (varop
), count
));
11413 if (GET_MODE (x
) != result_mode
)
11414 x
= gen_lowpart (result_mode
, x
);
11419 /* A subroutine of recog_for_combine. See there for arguments and
11423 recog_for_combine_1 (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11425 rtx pat
= *pnewpat
;
11426 rtx pat_without_clobbers
;
11427 int insn_code_number
;
11428 int num_clobbers_to_add
= 0;
11430 rtx notes
= NULL_RTX
;
11431 rtx old_notes
, old_pat
;
11434 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11435 we use to indicate that something didn't match. If we find such a
11436 thing, force rejection. */
11437 if (GET_CODE (pat
) == PARALLEL
)
11438 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
11439 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
11440 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
11443 old_pat
= PATTERN (insn
);
11444 old_notes
= REG_NOTES (insn
);
11445 PATTERN (insn
) = pat
;
11446 REG_NOTES (insn
) = NULL_RTX
;
11448 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11449 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11451 if (insn_code_number
< 0)
11452 fputs ("Failed to match this instruction:\n", dump_file
);
11454 fputs ("Successfully matched this instruction:\n", dump_file
);
11455 print_rtl_single (dump_file
, pat
);
11458 /* If it isn't, there is the possibility that we previously had an insn
11459 that clobbered some register as a side effect, but the combined
11460 insn doesn't need to do that. So try once more without the clobbers
11461 unless this represents an ASM insn. */
11463 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
11464 && GET_CODE (pat
) == PARALLEL
)
11468 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
11469 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
11472 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
11476 SUBST_INT (XVECLEN (pat
, 0), pos
);
11479 pat
= XVECEXP (pat
, 0, 0);
11481 PATTERN (insn
) = pat
;
11482 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11483 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11485 if (insn_code_number
< 0)
11486 fputs ("Failed to match this instruction:\n", dump_file
);
11488 fputs ("Successfully matched this instruction:\n", dump_file
);
11489 print_rtl_single (dump_file
, pat
);
11493 pat_without_clobbers
= pat
;
11495 PATTERN (insn
) = old_pat
;
11496 REG_NOTES (insn
) = old_notes
;
11498 /* Recognize all noop sets, these will be killed by followup pass. */
11499 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
11500 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
11502 /* If we had any clobbers to add, make a new pattern than contains
11503 them. Then check to make sure that all of them are dead. */
11504 if (num_clobbers_to_add
)
11506 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
11507 rtvec_alloc (GET_CODE (pat
) == PARALLEL
11508 ? (XVECLEN (pat
, 0)
11509 + num_clobbers_to_add
)
11510 : num_clobbers_to_add
+ 1));
11512 if (GET_CODE (pat
) == PARALLEL
)
11513 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11514 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
11516 XVECEXP (newpat
, 0, 0) = pat
;
11518 add_clobbers (newpat
, insn_code_number
);
11520 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
11521 i
< XVECLEN (newpat
, 0); i
++)
11523 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
11524 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
11526 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
11528 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
11529 notes
= alloc_reg_note (REG_UNUSED
,
11530 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
11536 if (insn_code_number
>= 0
11537 && insn_code_number
!= NOOP_MOVE_INSN_CODE
)
11539 old_pat
= PATTERN (insn
);
11540 old_notes
= REG_NOTES (insn
);
11541 old_icode
= INSN_CODE (insn
);
11542 PATTERN (insn
) = pat
;
11543 REG_NOTES (insn
) = notes
;
11544 INSN_CODE (insn
) = insn_code_number
;
11546 /* Allow targets to reject combined insn. */
11547 if (!targetm
.legitimate_combined_insn (insn
))
11549 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11550 fputs ("Instruction not appropriate for target.",
11553 /* Callers expect recog_for_combine to strip
11554 clobbers from the pattern on failure. */
11555 pat
= pat_without_clobbers
;
11558 insn_code_number
= -1;
11561 PATTERN (insn
) = old_pat
;
11562 REG_NOTES (insn
) = old_notes
;
11563 INSN_CODE (insn
) = old_icode
;
11569 return insn_code_number
;
11572 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11573 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11574 Return whether anything was so changed. */
11577 change_zero_ext (rtx pat
)
11579 bool changed
= false;
11580 rtx
*src
= &SET_SRC (pat
);
11582 subrtx_ptr_iterator::array_type array
;
11583 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11586 scalar_int_mode mode
, inner_mode
;
11587 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
11591 if (GET_CODE (x
) == ZERO_EXTRACT
11592 && CONST_INT_P (XEXP (x
, 1))
11593 && CONST_INT_P (XEXP (x
, 2))
11594 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
)
11595 && GET_MODE_PRECISION (inner_mode
) <= GET_MODE_PRECISION (mode
))
11597 size
= INTVAL (XEXP (x
, 1));
11599 int start
= INTVAL (XEXP (x
, 2));
11600 if (BITS_BIG_ENDIAN
)
11601 start
= GET_MODE_PRECISION (inner_mode
) - size
- start
;
11604 x
= gen_rtx_LSHIFTRT (inner_mode
, XEXP (x
, 0),
11605 gen_int_shift_amount (inner_mode
, start
));
11609 if (mode
!= inner_mode
)
11611 if (REG_P (x
) && HARD_REGISTER_P (x
)
11612 && !can_change_dest_mode (x
, 0, mode
))
11615 x
= gen_lowpart_SUBREG (mode
, x
);
11618 else if (GET_CODE (x
) == ZERO_EXTEND
11619 && GET_CODE (XEXP (x
, 0)) == SUBREG
11620 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x
, 0))))
11621 && !paradoxical_subreg_p (XEXP (x
, 0))
11622 && subreg_lowpart_p (XEXP (x
, 0)))
11624 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11625 size
= GET_MODE_PRECISION (inner_mode
);
11626 x
= SUBREG_REG (XEXP (x
, 0));
11627 if (GET_MODE (x
) != mode
)
11629 if (REG_P (x
) && HARD_REGISTER_P (x
)
11630 && !can_change_dest_mode (x
, 0, mode
))
11633 x
= gen_lowpart_SUBREG (mode
, x
);
11636 else if (GET_CODE (x
) == ZERO_EXTEND
11637 && REG_P (XEXP (x
, 0))
11638 && HARD_REGISTER_P (XEXP (x
, 0))
11639 && can_change_dest_mode (XEXP (x
, 0), 0, mode
))
11641 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11642 size
= GET_MODE_PRECISION (inner_mode
);
11643 x
= gen_rtx_REG (mode
, REGNO (XEXP (x
, 0)));
11648 if (!(GET_CODE (x
) == LSHIFTRT
11649 && CONST_INT_P (XEXP (x
, 1))
11650 && size
+ INTVAL (XEXP (x
, 1)) == GET_MODE_PRECISION (mode
)))
11652 wide_int mask
= wi::mask (size
, false, GET_MODE_PRECISION (mode
));
11653 x
= gen_rtx_AND (mode
, x
, immed_wide_int_const (mask
, mode
));
11661 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11662 maybe_swap_commutative_operands (**iter
);
11664 rtx
*dst
= &SET_DEST (pat
);
11665 scalar_int_mode mode
;
11666 if (GET_CODE (*dst
) == ZERO_EXTRACT
11667 && REG_P (XEXP (*dst
, 0))
11668 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (*dst
, 0)), &mode
)
11669 && CONST_INT_P (XEXP (*dst
, 1))
11670 && CONST_INT_P (XEXP (*dst
, 2)))
11672 rtx reg
= XEXP (*dst
, 0);
11673 int width
= INTVAL (XEXP (*dst
, 1));
11674 int offset
= INTVAL (XEXP (*dst
, 2));
11675 int reg_width
= GET_MODE_PRECISION (mode
);
11676 if (BITS_BIG_ENDIAN
)
11677 offset
= reg_width
- width
- offset
;
11680 wide_int mask
= wi::shifted_mask (offset
, width
, true, reg_width
);
11681 wide_int mask2
= wi::shifted_mask (offset
, width
, false, reg_width
);
11682 x
= gen_rtx_AND (mode
, reg
, immed_wide_int_const (mask
, mode
));
11684 y
= gen_rtx_ASHIFT (mode
, SET_SRC (pat
), GEN_INT (offset
));
11687 z
= gen_rtx_AND (mode
, y
, immed_wide_int_const (mask2
, mode
));
11688 w
= gen_rtx_IOR (mode
, x
, z
);
11689 SUBST (SET_DEST (pat
), reg
);
11690 SUBST (SET_SRC (pat
), w
);
11698 /* Like recog, but we receive the address of a pointer to a new pattern.
11699 We try to match the rtx that the pointer points to.
11700 If that fails, we may try to modify or replace the pattern,
11701 storing the replacement into the same pointer object.
11703 Modifications include deletion or addition of CLOBBERs. If the
11704 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11705 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11706 (and undo if that fails).
11708 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11709 the CLOBBERs are placed.
11711 The value is the final insn code from the pattern ultimately matched,
11715 recog_for_combine (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11717 rtx pat
= *pnewpat
;
11718 int insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11719 if (insn_code_number
>= 0 || check_asm_operands (pat
))
11720 return insn_code_number
;
11722 void *marker
= get_undo_marker ();
11723 bool changed
= false;
11725 if (GET_CODE (pat
) == SET
)
11726 changed
= change_zero_ext (pat
);
11727 else if (GET_CODE (pat
) == PARALLEL
)
11730 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11732 rtx set
= XVECEXP (pat
, 0, i
);
11733 if (GET_CODE (set
) == SET
)
11734 changed
|= change_zero_ext (set
);
11740 insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11742 if (insn_code_number
< 0)
11743 undo_to_marker (marker
);
11746 return insn_code_number
;
11749 /* Like gen_lowpart_general but for use by combine. In combine it
11750 is not possible to create any new pseudoregs. However, it is
11751 safe to create invalid memory addresses, because combine will
11752 try to recognize them and all they will do is make the combine
11755 If for some reason this cannot do its job, an rtx
11756 (clobber (const_int 0)) is returned.
11757 An insn containing that will not be recognized. */
11760 gen_lowpart_for_combine (machine_mode omode
, rtx x
)
11762 machine_mode imode
= GET_MODE (x
);
11765 if (omode
== imode
)
11768 /* We can only support MODE being wider than a word if X is a
11769 constant integer or has a mode the same size. */
11770 if (maybe_gt (GET_MODE_SIZE (omode
), UNITS_PER_WORD
)
11771 && ! (CONST_SCALAR_INT_P (x
)
11772 || known_eq (GET_MODE_SIZE (imode
), GET_MODE_SIZE (omode
))))
11775 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11776 won't know what to do. So we will strip off the SUBREG here and
11777 process normally. */
11778 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
11780 x
= SUBREG_REG (x
);
11782 /* For use in case we fall down into the address adjustments
11783 further below, we need to adjust the known mode and size of
11784 x; imode and isize, since we just adjusted x. */
11785 imode
= GET_MODE (x
);
11787 if (imode
== omode
)
11791 result
= gen_lowpart_common (omode
, x
);
11798 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11800 if (MEM_VOLATILE_P (x
)
11801 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
)))
11804 /* If we want to refer to something bigger than the original memref,
11805 generate a paradoxical subreg instead. That will force a reload
11806 of the original memref X. */
11807 if (paradoxical_subreg_p (omode
, imode
))
11808 return gen_rtx_SUBREG (omode
, x
, 0);
11810 poly_int64 offset
= byte_lowpart_offset (omode
, imode
);
11811 return adjust_address_nv (x
, omode
, offset
);
11814 /* If X is a comparison operator, rewrite it in a new mode. This
11815 probably won't match, but may allow further simplifications. */
11816 else if (COMPARISON_P (x
))
11817 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
11819 /* If we couldn't simplify X any other way, just enclose it in a
11820 SUBREG. Normally, this SUBREG won't match, but some patterns may
11821 include an explicit SUBREG or we may simplify it further in combine. */
11826 if (imode
== VOIDmode
)
11828 imode
= int_mode_for_mode (omode
).require ();
11829 x
= gen_lowpart_common (imode
, x
);
11833 res
= lowpart_subreg (omode
, x
, imode
);
11839 return gen_rtx_CLOBBER (omode
, const0_rtx
);
11842 /* Try to simplify a comparison between OP0 and a constant OP1,
11843 where CODE is the comparison code that will be tested, into a
11844 (CODE OP0 const0_rtx) form.
11846 The result is a possibly different comparison code to use.
11847 *POP1 may be updated. */
11849 static enum rtx_code
11850 simplify_compare_const (enum rtx_code code
, machine_mode mode
,
11851 rtx op0
, rtx
*pop1
)
11853 scalar_int_mode int_mode
;
11854 HOST_WIDE_INT const_op
= INTVAL (*pop1
);
11856 /* Get the constant we are comparing against and turn off all bits
11857 not on in our mode. */
11858 if (mode
!= VOIDmode
)
11859 const_op
= trunc_int_for_mode (const_op
, mode
);
11861 /* If we are comparing against a constant power of two and the value
11862 being compared can only have that single bit nonzero (e.g., it was
11863 `and'ed with that bit), we can replace this with a comparison
11866 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
11867 || code
== LT
|| code
== LTU
)
11868 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11869 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11870 && pow2p_hwi (const_op
& GET_MODE_MASK (int_mode
))
11871 && (nonzero_bits (op0
, int_mode
)
11872 == (unsigned HOST_WIDE_INT
) (const_op
& GET_MODE_MASK (int_mode
))))
11874 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11878 /* Similarly, if we are comparing a value known to be either -1 or
11879 0 with -1, change it to the opposite comparison against zero. */
11881 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11882 || code
== GEU
|| code
== LTU
)
11883 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11884 && num_sign_bit_copies (op0
, int_mode
) == GET_MODE_PRECISION (int_mode
))
11886 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11890 /* Do some canonicalizations based on the comparison code. We prefer
11891 comparisons against zero and then prefer equality comparisons.
11892 If we can reduce the size of a constant, we will do that too. */
11896 /* < C is equivalent to <= (C - 1) */
11901 /* ... fall through to LE case below. */
11902 gcc_fallthrough ();
11908 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11915 /* If we are doing a <= 0 comparison on a value known to have
11916 a zero sign bit, we can replace this with == 0. */
11917 else if (const_op
== 0
11918 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11919 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11920 && (nonzero_bits (op0
, int_mode
)
11921 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11927 /* >= C is equivalent to > (C - 1). */
11932 /* ... fall through to GT below. */
11933 gcc_fallthrough ();
11939 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11946 /* If we are doing a > 0 comparison on a value known to have
11947 a zero sign bit, we can replace this with != 0. */
11948 else if (const_op
== 0
11949 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11950 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11951 && (nonzero_bits (op0
, int_mode
)
11952 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11958 /* < C is equivalent to <= (C - 1). */
11963 /* ... fall through ... */
11964 gcc_fallthrough ();
11966 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11967 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11968 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11969 && ((unsigned HOST_WIDE_INT
) const_op
11970 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11980 /* unsigned <= 0 is equivalent to == 0 */
11983 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11984 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11985 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11986 && ((unsigned HOST_WIDE_INT
) const_op
11987 == ((HOST_WIDE_INT_1U
11988 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1)))
11996 /* >= C is equivalent to > (C - 1). */
12001 /* ... fall through ... */
12002 gcc_fallthrough ();
12005 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
12006 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
12007 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
12008 && ((unsigned HOST_WIDE_INT
) const_op
12009 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
12019 /* unsigned > 0 is equivalent to != 0 */
12022 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
12023 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
12024 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
12025 && ((unsigned HOST_WIDE_INT
) const_op
12026 == (HOST_WIDE_INT_1U
12027 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1))
12038 *pop1
= GEN_INT (const_op
);
12042 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
12043 comparison code that will be tested.
12045 The result is a possibly different comparison code to use. *POP0 and
12046 *POP1 may be updated.
12048 It is possible that we might detect that a comparison is either always
12049 true or always false. However, we do not perform general constant
12050 folding in combine, so this knowledge isn't useful. Such tautologies
12051 should have been detected earlier. Hence we ignore all such cases. */
12053 static enum rtx_code
12054 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
12060 scalar_int_mode mode
, inner_mode
, tmode
;
12061 opt_scalar_int_mode tmode_iter
;
12063 /* Try a few ways of applying the same transformation to both operands. */
12066 /* The test below this one won't handle SIGN_EXTENDs on these machines,
12067 so check specially. */
12068 if (!WORD_REGISTER_OPERATIONS
12069 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
12070 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
12071 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12072 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
12073 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
12074 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
12075 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &mode
)
12076 && (is_a
<scalar_int_mode
>
12077 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))), &inner_mode
))
12078 && inner_mode
== GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0)))
12079 && CONST_INT_P (XEXP (op0
, 1))
12080 && XEXP (op0
, 1) == XEXP (op1
, 1)
12081 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12082 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
12083 && (INTVAL (XEXP (op0
, 1))
12084 == (GET_MODE_PRECISION (mode
)
12085 - GET_MODE_PRECISION (inner_mode
))))
12087 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
12088 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
12091 /* If both operands are the same constant shift, see if we can ignore the
12092 shift. We can if the shift is a rotate or if the bits shifted out of
12093 this shift are known to be zero for both inputs and if the type of
12094 comparison is compatible with the shift. */
12095 if (GET_CODE (op0
) == GET_CODE (op1
)
12096 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
12097 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
12098 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
12099 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
12100 || (GET_CODE (op0
) == ASHIFTRT
12101 && (code
!= GTU
&& code
!= LTU
12102 && code
!= GEU
&& code
!= LEU
)))
12103 && CONST_INT_P (XEXP (op0
, 1))
12104 && INTVAL (XEXP (op0
, 1)) >= 0
12105 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12106 && XEXP (op0
, 1) == XEXP (op1
, 1))
12108 machine_mode mode
= GET_MODE (op0
);
12109 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
12110 int shift_count
= INTVAL (XEXP (op0
, 1));
12112 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
12113 mask
&= (mask
>> shift_count
) << shift_count
;
12114 else if (GET_CODE (op0
) == ASHIFT
)
12115 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
12117 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
12118 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
12119 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
12124 /* If both operands are AND's of a paradoxical SUBREG by constant, the
12125 SUBREGs are of the same mode, and, in both cases, the AND would
12126 be redundant if the comparison was done in the narrower mode,
12127 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12128 and the operand's possibly nonzero bits are 0xffffff01; in that case
12129 if we only care about QImode, we don't need the AND). This case
12130 occurs if the output mode of an scc insn is not SImode and
12131 STORE_FLAG_VALUE == 1 (e.g., the 386).
12133 Similarly, check for a case where the AND's are ZERO_EXTEND
12134 operations from some narrower mode even though a SUBREG is not
12137 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
12138 && CONST_INT_P (XEXP (op0
, 1))
12139 && CONST_INT_P (XEXP (op1
, 1)))
12141 rtx inner_op0
= XEXP (op0
, 0);
12142 rtx inner_op1
= XEXP (op1
, 0);
12143 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
12144 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
12147 if (paradoxical_subreg_p (inner_op0
)
12148 && GET_CODE (inner_op1
) == SUBREG
12149 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0
)))
12150 && (GET_MODE (SUBREG_REG (inner_op0
))
12151 == GET_MODE (SUBREG_REG (inner_op1
)))
12152 && ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
12153 GET_MODE (SUBREG_REG (inner_op0
)))) == 0
12154 && ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
12155 GET_MODE (SUBREG_REG (inner_op1
)))) == 0)
12157 op0
= SUBREG_REG (inner_op0
);
12158 op1
= SUBREG_REG (inner_op1
);
12160 /* The resulting comparison is always unsigned since we masked
12161 off the original sign bit. */
12162 code
= unsigned_condition (code
);
12168 FOR_EACH_MODE_UNTIL (tmode
,
12169 as_a
<scalar_int_mode
> (GET_MODE (op0
)))
12170 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
12172 op0
= gen_lowpart_or_truncate (tmode
, inner_op0
);
12173 op1
= gen_lowpart_or_truncate (tmode
, inner_op1
);
12174 code
= unsigned_condition (code
);
12183 /* If both operands are NOT, we can strip off the outer operation
12184 and adjust the comparison code for swapped operands; similarly for
12185 NEG, except that this must be an equality comparison. */
12186 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
12187 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
12188 && (code
== EQ
|| code
== NE
)))
12189 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
12195 /* If the first operand is a constant, swap the operands and adjust the
12196 comparison code appropriately, but don't do this if the second operand
12197 is already a constant integer. */
12198 if (swap_commutative_operands_p (op0
, op1
))
12200 std::swap (op0
, op1
);
12201 code
= swap_condition (code
);
12204 /* We now enter a loop during which we will try to simplify the comparison.
12205 For the most part, we only are concerned with comparisons with zero,
12206 but some things may really be comparisons with zero but not start
12207 out looking that way. */
12209 while (CONST_INT_P (op1
))
12211 machine_mode raw_mode
= GET_MODE (op0
);
12212 scalar_int_mode int_mode
;
12213 int equality_comparison_p
;
12214 int sign_bit_comparison_p
;
12215 int unsigned_comparison_p
;
12216 HOST_WIDE_INT const_op
;
12218 /* We only want to handle integral modes. This catches VOIDmode,
12219 CCmode, and the floating-point modes. An exception is that we
12220 can handle VOIDmode if OP0 is a COMPARE or a comparison
12223 if (GET_MODE_CLASS (raw_mode
) != MODE_INT
12224 && ! (raw_mode
== VOIDmode
12225 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
12228 /* Try to simplify the compare to constant, possibly changing the
12229 comparison op, and/or changing op1 to zero. */
12230 code
= simplify_compare_const (code
, raw_mode
, op0
, &op1
);
12231 const_op
= INTVAL (op1
);
12233 /* Compute some predicates to simplify code below. */
12235 equality_comparison_p
= (code
== EQ
|| code
== NE
);
12236 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
12237 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
12240 /* If this is a sign bit comparison and we can do arithmetic in
12241 MODE, say that we will only be needing the sign bit of OP0. */
12242 if (sign_bit_comparison_p
12243 && is_a
<scalar_int_mode
> (raw_mode
, &int_mode
)
12244 && HWI_COMPUTABLE_MODE_P (int_mode
))
12245 op0
= force_to_mode (op0
, int_mode
,
12247 << (GET_MODE_PRECISION (int_mode
) - 1),
12250 if (COMPARISON_P (op0
))
12252 /* We can't do anything if OP0 is a condition code value, rather
12253 than an actual data value. */
12255 || CC0_P (XEXP (op0
, 0))
12256 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
12259 /* Get the two operands being compared. */
12260 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
12261 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
12263 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
12265 /* Check for the cases where we simply want the result of the
12266 earlier test or the opposite of that result. */
12267 if (code
== NE
|| code
== EQ
12268 || (val_signbit_known_set_p (raw_mode
, STORE_FLAG_VALUE
)
12269 && (code
== LT
|| code
== GE
)))
12271 enum rtx_code new_code
;
12272 if (code
== LT
|| code
== NE
)
12273 new_code
= GET_CODE (op0
);
12275 new_code
= reversed_comparison_code (op0
, NULL
);
12277 if (new_code
!= UNKNOWN
)
12288 if (raw_mode
== VOIDmode
)
12290 scalar_int_mode mode
= as_a
<scalar_int_mode
> (raw_mode
);
12292 /* Now try cases based on the opcode of OP0. If none of the cases
12293 does a "continue", we exit this loop immediately after the
12296 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
12297 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
12298 switch (GET_CODE (op0
))
12301 /* If we are extracting a single bit from a variable position in
12302 a constant that has only a single bit set and are comparing it
12303 with zero, we can convert this into an equality comparison
12304 between the position and the location of the single bit. */
12305 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12306 have already reduced the shift count modulo the word size. */
12307 if (!SHIFT_COUNT_TRUNCATED
12308 && CONST_INT_P (XEXP (op0
, 0))
12309 && XEXP (op0
, 1) == const1_rtx
12310 && equality_comparison_p
&& const_op
== 0
12311 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
12313 if (BITS_BIG_ENDIAN
)
12314 i
= BITS_PER_WORD
- 1 - i
;
12316 op0
= XEXP (op0
, 2);
12320 /* Result is nonzero iff shift count is equal to I. */
12321 code
= reverse_condition (code
);
12328 tem
= expand_compound_operation (op0
);
12337 /* If testing for equality, we can take the NOT of the constant. */
12338 if (equality_comparison_p
12339 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
12341 op0
= XEXP (op0
, 0);
12346 /* If just looking at the sign bit, reverse the sense of the
12348 if (sign_bit_comparison_p
)
12350 op0
= XEXP (op0
, 0);
12351 code
= (code
== GE
? LT
: GE
);
12357 /* If testing for equality, we can take the NEG of the constant. */
12358 if (equality_comparison_p
12359 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
12361 op0
= XEXP (op0
, 0);
12366 /* The remaining cases only apply to comparisons with zero. */
12370 /* When X is ABS or is known positive,
12371 (neg X) is < 0 if and only if X != 0. */
12373 if (sign_bit_comparison_p
12374 && (GET_CODE (XEXP (op0
, 0)) == ABS
12375 || (mode_width
<= HOST_BITS_PER_WIDE_INT
12376 && (nonzero_bits (XEXP (op0
, 0), mode
)
12377 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12380 op0
= XEXP (op0
, 0);
12381 code
= (code
== LT
? NE
: EQ
);
12385 /* If we have NEG of something whose two high-order bits are the
12386 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12387 if (num_sign_bit_copies (op0
, mode
) >= 2)
12389 op0
= XEXP (op0
, 0);
12390 code
= swap_condition (code
);
12396 /* If we are testing equality and our count is a constant, we
12397 can perform the inverse operation on our RHS. */
12398 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12399 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
12400 op1
, XEXP (op0
, 1))) != 0)
12402 op0
= XEXP (op0
, 0);
12407 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12408 a particular bit. Convert it to an AND of a constant of that
12409 bit. This will be converted into a ZERO_EXTRACT. */
12410 if (const_op
== 0 && sign_bit_comparison_p
12411 && CONST_INT_P (XEXP (op0
, 1))
12412 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12414 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12417 - INTVAL (XEXP (op0
, 1)))));
12418 code
= (code
== LT
? NE
: EQ
);
12422 /* Fall through. */
12425 /* ABS is ignorable inside an equality comparison with zero. */
12426 if (const_op
== 0 && equality_comparison_p
)
12428 op0
= XEXP (op0
, 0);
12434 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12435 (compare FOO CONST) if CONST fits in FOO's mode and we
12436 are either testing inequality or have an unsigned
12437 comparison with ZERO_EXTEND or a signed comparison with
12438 SIGN_EXTEND. But don't do it if we don't have a compare
12439 insn of the given mode, since we'd have to revert it
12440 later on, and then we wouldn't know whether to sign- or
12442 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12443 && ! unsigned_comparison_p
12444 && HWI_COMPUTABLE_MODE_P (mode
)
12445 && trunc_int_for_mode (const_op
, mode
) == const_op
12446 && have_insn_for (COMPARE
, mode
))
12448 op0
= XEXP (op0
, 0);
12454 /* Check for the case where we are comparing A - C1 with C2, that is
12456 (subreg:MODE (plus (A) (-C1))) op (C2)
12458 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12459 comparison in the wider mode. One of the following two conditions
12460 must be true in order for this to be valid:
12462 1. The mode extension results in the same bit pattern being added
12463 on both sides and the comparison is equality or unsigned. As
12464 C2 has been truncated to fit in MODE, the pattern can only be
12467 2. The mode extension results in the sign bit being copied on
12470 The difficulty here is that we have predicates for A but not for
12471 (A - C1) so we need to check that C1 is within proper bounds so
12472 as to perturbate A as little as possible. */
12474 if (mode_width
<= HOST_BITS_PER_WIDE_INT
12475 && subreg_lowpart_p (op0
)
12476 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
12478 && GET_MODE_PRECISION (inner_mode
) > mode_width
12479 && GET_CODE (SUBREG_REG (op0
)) == PLUS
12480 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
12482 rtx a
= XEXP (SUBREG_REG (op0
), 0);
12483 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
12486 && (unsigned HOST_WIDE_INT
) c1
12487 < HOST_WIDE_INT_1U
<< (mode_width
- 1)
12488 && (equality_comparison_p
|| unsigned_comparison_p
)
12489 /* (A - C1) zero-extends if it is positive and sign-extends
12490 if it is negative, C2 both zero- and sign-extends. */
12491 && (((nonzero_bits (a
, inner_mode
)
12492 & ~GET_MODE_MASK (mode
)) == 0
12494 /* (A - C1) sign-extends if it is positive and 1-extends
12495 if it is negative, C2 both sign- and 1-extends. */
12496 || (num_sign_bit_copies (a
, inner_mode
)
12497 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12500 || ((unsigned HOST_WIDE_INT
) c1
12501 < HOST_WIDE_INT_1U
<< (mode_width
- 2)
12502 /* (A - C1) always sign-extends, like C2. */
12503 && num_sign_bit_copies (a
, inner_mode
)
12504 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12505 - (mode_width
- 1))))
12507 op0
= SUBREG_REG (op0
);
12512 /* If the inner mode is narrower and we are extracting the low part,
12513 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12514 if (paradoxical_subreg_p (op0
))
12516 else if (subreg_lowpart_p (op0
)
12517 && GET_MODE_CLASS (mode
) == MODE_INT
12518 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12519 && (code
== NE
|| code
== EQ
)
12520 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12521 && !paradoxical_subreg_p (op0
)
12522 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12523 & ~GET_MODE_MASK (mode
)) == 0)
12525 /* Remove outer subregs that don't do anything. */
12526 tem
= gen_lowpart (inner_mode
, op1
);
12528 if ((nonzero_bits (tem
, inner_mode
)
12529 & ~GET_MODE_MASK (mode
)) == 0)
12531 op0
= SUBREG_REG (op0
);
12543 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12544 && (unsigned_comparison_p
|| equality_comparison_p
)
12545 && HWI_COMPUTABLE_MODE_P (mode
)
12546 && (unsigned HOST_WIDE_INT
) const_op
<= GET_MODE_MASK (mode
)
12548 && have_insn_for (COMPARE
, mode
))
12550 op0
= XEXP (op0
, 0);
12556 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12557 this for equality comparisons due to pathological cases involving
12559 if (equality_comparison_p
12560 && (tem
= simplify_binary_operation (MINUS
, mode
,
12561 op1
, XEXP (op0
, 1))) != 0)
12563 op0
= XEXP (op0
, 0);
12568 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12569 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
12570 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
12572 op0
= XEXP (XEXP (op0
, 0), 0);
12573 code
= (code
== LT
? EQ
: NE
);
12579 /* We used to optimize signed comparisons against zero, but that
12580 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12581 arrive here as equality comparisons, or (GEU, LTU) are
12582 optimized away. No need to special-case them. */
12584 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12585 (eq B (minus A C)), whichever simplifies. We can only do
12586 this for equality comparisons due to pathological cases involving
12588 if (equality_comparison_p
12589 && (tem
= simplify_binary_operation (PLUS
, mode
,
12590 XEXP (op0
, 1), op1
)) != 0)
12592 op0
= XEXP (op0
, 0);
12597 if (equality_comparison_p
12598 && (tem
= simplify_binary_operation (MINUS
, mode
,
12599 XEXP (op0
, 0), op1
)) != 0)
12601 op0
= XEXP (op0
, 1);
12606 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12607 of bits in X minus 1, is one iff X > 0. */
12608 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
12609 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12610 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
12611 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12613 op0
= XEXP (op0
, 1);
12614 code
= (code
== GE
? LE
: GT
);
12620 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12621 if C is zero or B is a constant. */
12622 if (equality_comparison_p
12623 && (tem
= simplify_binary_operation (XOR
, mode
,
12624 XEXP (op0
, 1), op1
)) != 0)
12626 op0
= XEXP (op0
, 0);
12634 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12636 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
12637 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
12638 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12640 op0
= XEXP (op0
, 1);
12641 code
= (code
== GE
? GT
: LE
);
12647 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12648 will be converted to a ZERO_EXTRACT later. */
12649 if (const_op
== 0 && equality_comparison_p
12650 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12651 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
12653 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
12654 XEXP (XEXP (op0
, 0), 1));
12655 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12659 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12660 zero and X is a comparison and C1 and C2 describe only bits set
12661 in STORE_FLAG_VALUE, we can compare with X. */
12662 if (const_op
== 0 && equality_comparison_p
12663 && mode_width
<= HOST_BITS_PER_WIDE_INT
12664 && CONST_INT_P (XEXP (op0
, 1))
12665 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
12666 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12667 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
12668 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
12670 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12671 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
12672 if ((~STORE_FLAG_VALUE
& mask
) == 0
12673 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
12674 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
12675 && COMPARISON_P (tem
))))
12677 op0
= XEXP (XEXP (op0
, 0), 0);
12682 /* If we are doing an equality comparison of an AND of a bit equal
12683 to the sign bit, replace this with a LT or GE comparison of
12684 the underlying value. */
12685 if (equality_comparison_p
12687 && CONST_INT_P (XEXP (op0
, 1))
12688 && mode_width
<= HOST_BITS_PER_WIDE_INT
12689 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12690 == HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12692 op0
= XEXP (op0
, 0);
12693 code
= (code
== EQ
? GE
: LT
);
12697 /* If this AND operation is really a ZERO_EXTEND from a narrower
12698 mode, the constant fits within that mode, and this is either an
12699 equality or unsigned comparison, try to do this comparison in
12704 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12705 -> (ne:DI (reg:SI 4) (const_int 0))
12707 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12708 known to hold a value of the required mode the
12709 transformation is invalid. */
12710 if ((equality_comparison_p
|| unsigned_comparison_p
)
12711 && CONST_INT_P (XEXP (op0
, 1))
12712 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
12713 & GET_MODE_MASK (mode
))
12715 && const_op
>> i
== 0
12716 && int_mode_for_size (i
, 1).exists (&tmode
))
12718 op0
= gen_lowpart_or_truncate (tmode
, XEXP (op0
, 0));
12722 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12723 fits in both M1 and M2 and the SUBREG is either paradoxical
12724 or represents the low part, permute the SUBREG and the AND
12726 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
12727 && CONST_INT_P (XEXP (op0
, 1)))
12729 unsigned HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
12730 /* Require an integral mode, to avoid creating something like
12732 if ((is_a
<scalar_int_mode
>
12733 (GET_MODE (SUBREG_REG (XEXP (op0
, 0))), &tmode
))
12734 /* It is unsafe to commute the AND into the SUBREG if the
12735 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12736 not defined. As originally written the upper bits
12737 have a defined value due to the AND operation.
12738 However, if we commute the AND inside the SUBREG then
12739 they no longer have defined values and the meaning of
12740 the code has been changed.
12741 Also C1 should not change value in the smaller mode,
12742 see PR67028 (a positive C1 can become negative in the
12743 smaller mode, so that the AND does no longer mask the
12745 && ((WORD_REGISTER_OPERATIONS
12746 && mode_width
> GET_MODE_PRECISION (tmode
)
12747 && mode_width
<= BITS_PER_WORD
12748 && trunc_int_for_mode (c1
, tmode
) == (HOST_WIDE_INT
) c1
)
12749 || (mode_width
<= GET_MODE_PRECISION (tmode
)
12750 && subreg_lowpart_p (XEXP (op0
, 0))))
12751 && mode_width
<= HOST_BITS_PER_WIDE_INT
12752 && HWI_COMPUTABLE_MODE_P (tmode
)
12753 && (c1
& ~mask
) == 0
12754 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
12756 && c1
!= GET_MODE_MASK (tmode
))
12758 op0
= simplify_gen_binary (AND
, tmode
,
12759 SUBREG_REG (XEXP (op0
, 0)),
12760 gen_int_mode (c1
, tmode
));
12761 op0
= gen_lowpart (mode
, op0
);
12766 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12767 if (const_op
== 0 && equality_comparison_p
12768 && XEXP (op0
, 1) == const1_rtx
12769 && GET_CODE (XEXP (op0
, 0)) == NOT
)
12771 op0
= simplify_and_const_int (NULL_RTX
, mode
,
12772 XEXP (XEXP (op0
, 0), 0), 1);
12773 code
= (code
== NE
? EQ
: NE
);
12777 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12778 (eq (and (lshiftrt X) 1) 0).
12779 Also handle the case where (not X) is expressed using xor. */
12780 if (const_op
== 0 && equality_comparison_p
12781 && XEXP (op0
, 1) == const1_rtx
12782 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
12784 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
12785 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
12787 if (GET_CODE (shift_op
) == NOT
12788 || (GET_CODE (shift_op
) == XOR
12789 && CONST_INT_P (XEXP (shift_op
, 1))
12790 && CONST_INT_P (shift_count
)
12791 && HWI_COMPUTABLE_MODE_P (mode
)
12792 && (UINTVAL (XEXP (shift_op
, 1))
12793 == HOST_WIDE_INT_1U
12794 << INTVAL (shift_count
))))
12797 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
12798 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12799 code
= (code
== NE
? EQ
: NE
);
12806 /* If we have (compare (ashift FOO N) (const_int C)) and
12807 the high order N bits of FOO (N+1 if an inequality comparison)
12808 are known to be zero, we can do this by comparing FOO with C
12809 shifted right N bits so long as the low-order N bits of C are
12811 if (CONST_INT_P (XEXP (op0
, 1))
12812 && INTVAL (XEXP (op0
, 1)) >= 0
12813 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
12814 < HOST_BITS_PER_WIDE_INT
)
12815 && (((unsigned HOST_WIDE_INT
) const_op
12816 & ((HOST_WIDE_INT_1U
<< INTVAL (XEXP (op0
, 1)))
12818 && mode_width
<= HOST_BITS_PER_WIDE_INT
12819 && (nonzero_bits (XEXP (op0
, 0), mode
)
12820 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
12821 + ! equality_comparison_p
))) == 0)
12823 /* We must perform a logical shift, not an arithmetic one,
12824 as we want the top N bits of C to be zero. */
12825 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
12827 temp
>>= INTVAL (XEXP (op0
, 1));
12828 op1
= gen_int_mode (temp
, mode
);
12829 op0
= XEXP (op0
, 0);
12833 /* If we are doing a sign bit comparison, it means we are testing
12834 a particular bit. Convert it to the appropriate AND. */
12835 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12836 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12838 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12841 - INTVAL (XEXP (op0
, 1)))));
12842 code
= (code
== LT
? NE
: EQ
);
12846 /* If this an equality comparison with zero and we are shifting
12847 the low bit to the sign bit, we can convert this to an AND of the
12849 if (const_op
== 0 && equality_comparison_p
12850 && CONST_INT_P (XEXP (op0
, 1))
12851 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12853 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
12859 /* If this is an equality comparison with zero, we can do this
12860 as a logical shift, which might be much simpler. */
12861 if (equality_comparison_p
&& const_op
== 0
12862 && CONST_INT_P (XEXP (op0
, 1)))
12864 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
12866 INTVAL (XEXP (op0
, 1)));
12870 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12871 do the comparison in a narrower mode. */
12872 if (! unsigned_comparison_p
12873 && CONST_INT_P (XEXP (op0
, 1))
12874 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12875 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12876 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12878 && (((unsigned HOST_WIDE_INT
) const_op
12879 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12880 <= GET_MODE_MASK (tmode
)))
12882 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
12886 /* Likewise if OP0 is a PLUS of a sign extension with a
12887 constant, which is usually represented with the PLUS
12888 between the shifts. */
12889 if (! unsigned_comparison_p
12890 && CONST_INT_P (XEXP (op0
, 1))
12891 && GET_CODE (XEXP (op0
, 0)) == PLUS
12892 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12893 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
12894 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
12895 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12897 && (((unsigned HOST_WIDE_INT
) const_op
12898 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12899 <= GET_MODE_MASK (tmode
)))
12901 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
12902 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
12903 rtx new_const
= simplify_gen_binary (ASHIFTRT
, mode
,
12904 add_const
, XEXP (op0
, 1));
12906 op0
= simplify_gen_binary (PLUS
, tmode
,
12907 gen_lowpart (tmode
, inner
),
12914 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12915 the low order N bits of FOO are known to be zero, we can do this
12916 by comparing FOO with C shifted left N bits so long as no
12917 overflow occurs. Even if the low order N bits of FOO aren't known
12918 to be zero, if the comparison is >= or < we can use the same
12919 optimization and for > or <= by setting all the low
12920 order N bits in the comparison constant. */
12921 if (CONST_INT_P (XEXP (op0
, 1))
12922 && INTVAL (XEXP (op0
, 1)) > 0
12923 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12924 && mode_width
<= HOST_BITS_PER_WIDE_INT
12925 && (((unsigned HOST_WIDE_INT
) const_op
12926 + (GET_CODE (op0
) != LSHIFTRT
12927 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
12930 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
12932 unsigned HOST_WIDE_INT low_bits
12933 = (nonzero_bits (XEXP (op0
, 0), mode
)
12934 & ((HOST_WIDE_INT_1U
12935 << INTVAL (XEXP (op0
, 1))) - 1));
12936 if (low_bits
== 0 || !equality_comparison_p
)
12938 /* If the shift was logical, then we must make the condition
12940 if (GET_CODE (op0
) == LSHIFTRT
)
12941 code
= unsigned_condition (code
);
12943 const_op
= (unsigned HOST_WIDE_INT
) const_op
12944 << INTVAL (XEXP (op0
, 1));
12946 && (code
== GT
|| code
== GTU
12947 || code
== LE
|| code
== LEU
))
12949 |= ((HOST_WIDE_INT_1
<< INTVAL (XEXP (op0
, 1))) - 1);
12950 op1
= GEN_INT (const_op
);
12951 op0
= XEXP (op0
, 0);
12956 /* If we are using this shift to extract just the sign bit, we
12957 can replace this with an LT or GE comparison. */
12959 && (equality_comparison_p
|| sign_bit_comparison_p
)
12960 && CONST_INT_P (XEXP (op0
, 1))
12961 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12963 op0
= XEXP (op0
, 0);
12964 code
= (code
== NE
|| code
== GT
? LT
: GE
);
12976 /* Now make any compound operations involved in this comparison. Then,
12977 check for an outmost SUBREG on OP0 that is not doing anything or is
12978 paradoxical. The latter transformation must only be performed when
12979 it is known that the "extra" bits will be the same in op0 and op1 or
12980 that they don't matter. There are three cases to consider:
12982 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12983 care bits and we can assume they have any convenient value. So
12984 making the transformation is safe.
12986 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12987 In this case the upper bits of op0 are undefined. We should not make
12988 the simplification in that case as we do not know the contents of
12991 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12992 In that case we know those bits are zeros or ones. We must also be
12993 sure that they are the same as the upper bits of op1.
12995 We can never remove a SUBREG for a non-equality comparison because
12996 the sign bit is in a different place in the underlying object. */
12998 rtx_code op0_mco_code
= SET
;
12999 if (op1
== const0_rtx
)
13000 op0_mco_code
= code
== NE
|| code
== EQ
? EQ
: COMPARE
;
13002 op0
= make_compound_operation (op0
, op0_mco_code
);
13003 op1
= make_compound_operation (op1
, SET
);
13005 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
13006 && is_int_mode (GET_MODE (op0
), &mode
)
13007 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
13008 && (code
== NE
|| code
== EQ
))
13010 if (paradoxical_subreg_p (op0
))
13012 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
13014 if (REG_P (SUBREG_REG (op0
)))
13016 op0
= SUBREG_REG (op0
);
13017 op1
= gen_lowpart (inner_mode
, op1
);
13020 else if (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
13021 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
13022 & ~GET_MODE_MASK (mode
)) == 0)
13024 tem
= gen_lowpart (inner_mode
, op1
);
13026 if ((nonzero_bits (tem
, inner_mode
) & ~GET_MODE_MASK (mode
)) == 0)
13027 op0
= SUBREG_REG (op0
), op1
= tem
;
13031 /* We now do the opposite procedure: Some machines don't have compare
13032 insns in all modes. If OP0's mode is an integer mode smaller than a
13033 word and we can't do a compare in that mode, see if there is a larger
13034 mode for which we can do the compare. There are a number of cases in
13035 which we can use the wider mode. */
13037 if (is_int_mode (GET_MODE (op0
), &mode
)
13038 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
13039 && ! have_insn_for (COMPARE
, mode
))
13040 FOR_EACH_WIDER_MODE (tmode_iter
, mode
)
13042 tmode
= tmode_iter
.require ();
13043 if (!HWI_COMPUTABLE_MODE_P (tmode
))
13045 if (have_insn_for (COMPARE
, tmode
))
13049 /* If this is a test for negative, we can make an explicit
13050 test of the sign bit. Test this first so we can use
13051 a paradoxical subreg to extend OP0. */
13053 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
13054 && HWI_COMPUTABLE_MODE_P (mode
))
13056 unsigned HOST_WIDE_INT sign
13057 = HOST_WIDE_INT_1U
<< (GET_MODE_BITSIZE (mode
) - 1);
13058 op0
= simplify_gen_binary (AND
, tmode
,
13059 gen_lowpart (tmode
, op0
),
13060 gen_int_mode (sign
, tmode
));
13061 code
= (code
== LT
) ? NE
: EQ
;
13065 /* If the only nonzero bits in OP0 and OP1 are those in the
13066 narrower mode and this is an equality or unsigned comparison,
13067 we can use the wider mode. Similarly for sign-extended
13068 values, in which case it is true for all comparisons. */
13069 zero_extended
= ((code
== EQ
|| code
== NE
13070 || code
== GEU
|| code
== GTU
13071 || code
== LEU
|| code
== LTU
)
13072 && (nonzero_bits (op0
, tmode
)
13073 & ~GET_MODE_MASK (mode
)) == 0
13074 && ((CONST_INT_P (op1
)
13075 || (nonzero_bits (op1
, tmode
)
13076 & ~GET_MODE_MASK (mode
)) == 0)));
13079 || ((num_sign_bit_copies (op0
, tmode
)
13080 > (unsigned int) (GET_MODE_PRECISION (tmode
)
13081 - GET_MODE_PRECISION (mode
)))
13082 && (num_sign_bit_copies (op1
, tmode
)
13083 > (unsigned int) (GET_MODE_PRECISION (tmode
)
13084 - GET_MODE_PRECISION (mode
)))))
13086 /* If OP0 is an AND and we don't have an AND in MODE either,
13087 make a new AND in the proper mode. */
13088 if (GET_CODE (op0
) == AND
13089 && !have_insn_for (AND
, mode
))
13090 op0
= simplify_gen_binary (AND
, tmode
,
13091 gen_lowpart (tmode
,
13093 gen_lowpart (tmode
,
13099 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
13101 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
13106 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
13108 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
13117 /* We may have changed the comparison operands. Re-canonicalize. */
13118 if (swap_commutative_operands_p (op0
, op1
))
13120 std::swap (op0
, op1
);
13121 code
= swap_condition (code
);
13124 /* If this machine only supports a subset of valid comparisons, see if we
13125 can convert an unsupported one into a supported one. */
13126 target_canonicalize_comparison (&code
, &op0
, &op1
, 0);
13134 /* Utility function for record_value_for_reg. Count number of
13139 enum rtx_code code
= GET_CODE (x
);
13143 if (GET_RTX_CLASS (code
) == RTX_BIN_ARITH
13144 || GET_RTX_CLASS (code
) == RTX_COMM_ARITH
)
13146 rtx x0
= XEXP (x
, 0);
13147 rtx x1
= XEXP (x
, 1);
13150 return 1 + 2 * count_rtxs (x0
);
13152 if ((GET_RTX_CLASS (GET_CODE (x1
)) == RTX_BIN_ARITH
13153 || GET_RTX_CLASS (GET_CODE (x1
)) == RTX_COMM_ARITH
)
13154 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13155 return 2 + 2 * count_rtxs (x0
)
13156 + count_rtxs (x
== XEXP (x1
, 0)
13157 ? XEXP (x1
, 1) : XEXP (x1
, 0));
13159 if ((GET_RTX_CLASS (GET_CODE (x0
)) == RTX_BIN_ARITH
13160 || GET_RTX_CLASS (GET_CODE (x0
)) == RTX_COMM_ARITH
)
13161 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13162 return 2 + 2 * count_rtxs (x1
)
13163 + count_rtxs (x
== XEXP (x0
, 0)
13164 ? XEXP (x0
, 1) : XEXP (x0
, 0));
13167 fmt
= GET_RTX_FORMAT (code
);
13168 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13170 ret
+= count_rtxs (XEXP (x
, i
));
13171 else if (fmt
[i
] == 'E')
13172 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13173 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
13178 /* Utility function for following routine. Called when X is part of a value
13179 being stored into last_set_value. Sets last_set_table_tick
13180 for each register mentioned. Similar to mention_regs in cse.c */
13183 update_table_tick (rtx x
)
13185 enum rtx_code code
= GET_CODE (x
);
13186 const char *fmt
= GET_RTX_FORMAT (code
);
13191 unsigned int regno
= REGNO (x
);
13192 unsigned int endregno
= END_REGNO (x
);
13195 for (r
= regno
; r
< endregno
; r
++)
13197 reg_stat_type
*rsp
= ®_stat
[r
];
13198 rsp
->last_set_table_tick
= label_tick
;
13204 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13207 /* Check for identical subexpressions. If x contains
13208 identical subexpression we only have to traverse one of
13210 if (i
== 0 && ARITHMETIC_P (x
))
13212 /* Note that at this point x1 has already been
13214 rtx x0
= XEXP (x
, 0);
13215 rtx x1
= XEXP (x
, 1);
13217 /* If x0 and x1 are identical then there is no need to
13222 /* If x0 is identical to a subexpression of x1 then while
13223 processing x1, x0 has already been processed. Thus we
13224 are done with x. */
13225 if (ARITHMETIC_P (x1
)
13226 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13229 /* If x1 is identical to a subexpression of x0 then we
13230 still have to process the rest of x0. */
13231 if (ARITHMETIC_P (x0
)
13232 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13234 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
13239 update_table_tick (XEXP (x
, i
));
13241 else if (fmt
[i
] == 'E')
13242 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13243 update_table_tick (XVECEXP (x
, i
, j
));
13246 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13247 are saying that the register is clobbered and we no longer know its
13248 value. If INSN is zero, don't update reg_stat[].last_set; this is
13249 only permitted with VALUE also zero and is used to invalidate the
13253 record_value_for_reg (rtx reg
, rtx_insn
*insn
, rtx value
)
13255 unsigned int regno
= REGNO (reg
);
13256 unsigned int endregno
= END_REGNO (reg
);
13258 reg_stat_type
*rsp
;
13260 /* If VALUE contains REG and we have a previous value for REG, substitute
13261 the previous value. */
13262 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
13266 /* Set things up so get_last_value is allowed to see anything set up to
13268 subst_low_luid
= DF_INSN_LUID (insn
);
13269 tem
= get_last_value (reg
);
13271 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13272 it isn't going to be useful and will take a lot of time to process,
13273 so just use the CLOBBER. */
13277 if (ARITHMETIC_P (tem
)
13278 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
13279 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
13280 tem
= XEXP (tem
, 0);
13281 else if (count_occurrences (value
, reg
, 1) >= 2)
13283 /* If there are two or more occurrences of REG in VALUE,
13284 prevent the value from growing too much. */
13285 if (count_rtxs (tem
) > param_max_last_value_rtl
)
13286 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
13289 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
13293 /* For each register modified, show we don't know its value, that
13294 we don't know about its bitwise content, that its value has been
13295 updated, and that we don't know the location of the death of the
13297 for (i
= regno
; i
< endregno
; i
++)
13299 rsp
= ®_stat
[i
];
13302 rsp
->last_set
= insn
;
13304 rsp
->last_set_value
= 0;
13305 rsp
->last_set_mode
= VOIDmode
;
13306 rsp
->last_set_nonzero_bits
= 0;
13307 rsp
->last_set_sign_bit_copies
= 0;
13308 rsp
->last_death
= 0;
13309 rsp
->truncated_to_mode
= VOIDmode
;
13312 /* Mark registers that are being referenced in this value. */
13314 update_table_tick (value
);
13316 /* Now update the status of each register being set.
13317 If someone is using this register in this block, set this register
13318 to invalid since we will get confused between the two lives in this
13319 basic block. This makes using this register always invalid. In cse, we
13320 scan the table to invalidate all entries using this register, but this
13321 is too much work for us. */
13323 for (i
= regno
; i
< endregno
; i
++)
13325 rsp
= ®_stat
[i
];
13326 rsp
->last_set_label
= label_tick
;
13328 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
13329 rsp
->last_set_invalid
= 1;
13331 rsp
->last_set_invalid
= 0;
13334 /* The value being assigned might refer to X (like in "x++;"). In that
13335 case, we must replace it with (clobber (const_int 0)) to prevent
13337 rsp
= ®_stat
[regno
];
13338 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
13340 value
= copy_rtx (value
);
13341 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
13345 /* For the main register being modified, update the value, the mode, the
13346 nonzero bits, and the number of sign bit copies. */
13348 rsp
->last_set_value
= value
;
13352 machine_mode mode
= GET_MODE (reg
);
13353 subst_low_luid
= DF_INSN_LUID (insn
);
13354 rsp
->last_set_mode
= mode
;
13355 if (GET_MODE_CLASS (mode
) == MODE_INT
13356 && HWI_COMPUTABLE_MODE_P (mode
))
13357 mode
= nonzero_bits_mode
;
13358 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
13359 rsp
->last_set_sign_bit_copies
13360 = num_sign_bit_copies (value
, GET_MODE (reg
));
13364 /* Called via note_stores from record_dead_and_set_regs to handle one
13365 SET or CLOBBER in an insn. DATA is the instruction in which the
13366 set is occurring. */
13369 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
13371 rtx_insn
*record_dead_insn
= (rtx_insn
*) data
;
13373 if (GET_CODE (dest
) == SUBREG
)
13374 dest
= SUBREG_REG (dest
);
13376 if (!record_dead_insn
)
13379 record_value_for_reg (dest
, NULL
, NULL_RTX
);
13385 /* If we are setting the whole register, we know its value. Otherwise
13386 show that we don't know the value. We can handle a SUBREG if it's
13387 the low part, but we must be careful with paradoxical SUBREGs on
13388 RISC architectures because we cannot strip e.g. an extension around
13389 a load and record the naked load since the RTL middle-end considers
13390 that the upper bits are defined according to LOAD_EXTEND_OP. */
13391 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
13392 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
13393 else if (GET_CODE (setter
) == SET
13394 && GET_CODE (SET_DEST (setter
)) == SUBREG
13395 && SUBREG_REG (SET_DEST (setter
)) == dest
13396 && known_le (GET_MODE_PRECISION (GET_MODE (dest
)),
13398 && subreg_lowpart_p (SET_DEST (setter
)))
13399 record_value_for_reg (dest
, record_dead_insn
,
13400 WORD_REGISTER_OPERATIONS
13401 && word_register_operation_p (SET_SRC (setter
))
13402 && paradoxical_subreg_p (SET_DEST (setter
))
13404 : gen_lowpart (GET_MODE (dest
),
13405 SET_SRC (setter
)));
13407 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
13409 else if (MEM_P (dest
)
13410 /* Ignore pushes, they clobber nothing. */
13411 && ! push_operand (dest
, GET_MODE (dest
)))
13412 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
13415 /* Update the records of when each REG was most recently set or killed
13416 for the things done by INSN. This is the last thing done in processing
13417 INSN in the combiner loop.
13419 We update reg_stat[], in particular fields last_set, last_set_value,
13420 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13421 last_death, and also the similar information mem_last_set (which insn
13422 most recently modified memory) and last_call_luid (which insn was the
13423 most recent subroutine call). */
13426 record_dead_and_set_regs (rtx_insn
*insn
)
13431 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
13433 if (REG_NOTE_KIND (link
) == REG_DEAD
13434 && REG_P (XEXP (link
, 0)))
13436 unsigned int regno
= REGNO (XEXP (link
, 0));
13437 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
13439 for (i
= regno
; i
< endregno
; i
++)
13441 reg_stat_type
*rsp
;
13443 rsp
= ®_stat
[i
];
13444 rsp
->last_death
= insn
;
13447 else if (REG_NOTE_KIND (link
) == REG_INC
)
13448 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
13453 HARD_REG_SET callee_clobbers
13454 = insn_callee_abi (insn
).full_and_partial_reg_clobbers ();
13455 hard_reg_set_iterator hrsi
;
13456 EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers
, 0, i
, hrsi
)
13458 reg_stat_type
*rsp
;
13460 /* ??? We could try to preserve some information from the last
13461 set of register I if the call doesn't actually clobber
13462 (reg:last_set_mode I), which might be true for ABIs with
13463 partial clobbers. However, it would be difficult to
13464 update last_set_nonzero_bits and last_sign_bit_copies
13465 to account for the part of I that actually was clobbered.
13466 It wouldn't help much anyway, since we rarely see this
13467 situation before RA. */
13468 rsp
= ®_stat
[i
];
13469 rsp
->last_set_invalid
= 1;
13470 rsp
->last_set
= insn
;
13471 rsp
->last_set_value
= 0;
13472 rsp
->last_set_mode
= VOIDmode
;
13473 rsp
->last_set_nonzero_bits
= 0;
13474 rsp
->last_set_sign_bit_copies
= 0;
13475 rsp
->last_death
= 0;
13476 rsp
->truncated_to_mode
= VOIDmode
;
13479 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
13481 /* We can't combine into a call pattern. Remember, though, that
13482 the return value register is set at this LUID. We could
13483 still replace a register with the return value from the
13484 wrong subroutine call! */
13485 note_stores (insn
, record_dead_and_set_regs_1
, NULL_RTX
);
13488 note_stores (insn
, record_dead_and_set_regs_1
, insn
);
13491 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13492 register present in the SUBREG, so for each such SUBREG go back and
13493 adjust nonzero and sign bit information of the registers that are
13494 known to have some zero/sign bits set.
13496 This is needed because when combine blows the SUBREGs away, the
13497 information on zero/sign bits is lost and further combines can be
13498 missed because of that. */
13501 record_promoted_value (rtx_insn
*insn
, rtx subreg
)
13503 struct insn_link
*links
;
13505 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
13506 machine_mode mode
= GET_MODE (subreg
);
13508 if (!HWI_COMPUTABLE_MODE_P (mode
))
13511 for (links
= LOG_LINKS (insn
); links
;)
13513 reg_stat_type
*rsp
;
13515 insn
= links
->insn
;
13516 set
= single_set (insn
);
13518 if (! set
|| !REG_P (SET_DEST (set
))
13519 || REGNO (SET_DEST (set
)) != regno
13520 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
13522 links
= links
->next
;
13526 rsp
= ®_stat
[regno
];
13527 if (rsp
->last_set
== insn
)
13529 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
))
13530 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
13533 if (REG_P (SET_SRC (set
)))
13535 regno
= REGNO (SET_SRC (set
));
13536 links
= LOG_LINKS (insn
);
13543 /* Check if X, a register, is known to contain a value already
13544 truncated to MODE. In this case we can use a subreg to refer to
13545 the truncated value even though in the generic case we would need
13546 an explicit truncation. */
13549 reg_truncated_to_mode (machine_mode mode
, const_rtx x
)
13551 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
13552 machine_mode truncated
= rsp
->truncated_to_mode
;
13555 || rsp
->truncation_label
< label_tick_ebb_start
)
13557 if (!partial_subreg_p (mode
, truncated
))
13559 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, truncated
))
13564 /* If X is a hard reg or a subreg record the mode that the register is
13565 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13566 able to turn a truncate into a subreg using this information. Return true
13567 if traversing X is complete. */
13570 record_truncated_value (rtx x
)
13572 machine_mode truncated_mode
;
13573 reg_stat_type
*rsp
;
13575 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
13577 machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
13578 truncated_mode
= GET_MODE (x
);
13580 if (!partial_subreg_p (truncated_mode
, original_mode
))
13583 truncated_mode
= GET_MODE (x
);
13584 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode
, original_mode
))
13587 x
= SUBREG_REG (x
);
13589 /* ??? For hard-regs we now record everything. We might be able to
13590 optimize this using last_set_mode. */
13591 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
13592 truncated_mode
= GET_MODE (x
);
13596 rsp
= ®_stat
[REGNO (x
)];
13597 if (rsp
->truncated_to_mode
== 0
13598 || rsp
->truncation_label
< label_tick_ebb_start
13599 || partial_subreg_p (truncated_mode
, rsp
->truncated_to_mode
))
13601 rsp
->truncated_to_mode
= truncated_mode
;
13602 rsp
->truncation_label
= label_tick
;
13608 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13609 the modes they are used in. This can help truning TRUNCATEs into
13613 record_truncated_values (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
13615 subrtx_var_iterator::array_type array
;
13616 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
13617 if (record_truncated_value (*iter
))
13618 iter
.skip_subrtxes ();
13621 /* Scan X for promoted SUBREGs. For each one found,
13622 note what it implies to the registers used in it. */
13625 check_promoted_subreg (rtx_insn
*insn
, rtx x
)
13627 if (GET_CODE (x
) == SUBREG
13628 && SUBREG_PROMOTED_VAR_P (x
)
13629 && REG_P (SUBREG_REG (x
)))
13630 record_promoted_value (insn
, x
);
13633 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
13636 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
13640 check_promoted_subreg (insn
, XEXP (x
, i
));
13644 if (XVEC (x
, i
) != 0)
13645 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13646 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
13652 /* Verify that all the registers and memory references mentioned in *LOC are
13653 still valid. *LOC was part of a value set in INSN when label_tick was
13654 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13655 the invalid references with (clobber (const_int 0)) and return 1. This
13656 replacement is useful because we often can get useful information about
13657 the form of a value (e.g., if it was produced by a shift that always
13658 produces -1 or 0) even though we don't know exactly what registers it
13659 was produced from. */
13662 get_last_value_validate (rtx
*loc
, rtx_insn
*insn
, int tick
, int replace
)
13665 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
13666 int len
= GET_RTX_LENGTH (GET_CODE (x
));
13671 unsigned int regno
= REGNO (x
);
13672 unsigned int endregno
= END_REGNO (x
);
13675 for (j
= regno
; j
< endregno
; j
++)
13677 reg_stat_type
*rsp
= ®_stat
[j
];
13678 if (rsp
->last_set_invalid
13679 /* If this is a pseudo-register that was only set once and not
13680 live at the beginning of the function, it is always valid. */
13681 || (! (regno
>= FIRST_PSEUDO_REGISTER
13682 && regno
< reg_n_sets_max
13683 && REG_N_SETS (regno
) == 1
13684 && (!REGNO_REG_SET_P
13685 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
13687 && rsp
->last_set_label
> tick
))
13690 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13697 /* If this is a memory reference, make sure that there were no stores after
13698 it that might have clobbered the value. We don't have alias info, so we
13699 assume any store invalidates it. Moreover, we only have local UIDs, so
13700 we also assume that there were stores in the intervening basic blocks. */
13701 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
13702 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
13705 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13709 for (i
= 0; i
< len
; i
++)
13713 /* Check for identical subexpressions. If x contains
13714 identical subexpression we only have to traverse one of
13716 if (i
== 1 && ARITHMETIC_P (x
))
13718 /* Note that at this point x0 has already been checked
13719 and found valid. */
13720 rtx x0
= XEXP (x
, 0);
13721 rtx x1
= XEXP (x
, 1);
13723 /* If x0 and x1 are identical then x is also valid. */
13727 /* If x1 is identical to a subexpression of x0 then
13728 while checking x0, x1 has already been checked. Thus
13729 it is valid and so as x. */
13730 if (ARITHMETIC_P (x0
)
13731 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13734 /* If x0 is identical to a subexpression of x1 then x is
13735 valid iff the rest of x1 is valid. */
13736 if (ARITHMETIC_P (x1
)
13737 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13739 get_last_value_validate (&XEXP (x1
,
13740 x0
== XEXP (x1
, 0) ? 1 : 0),
13741 insn
, tick
, replace
);
13744 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
13748 else if (fmt
[i
] == 'E')
13749 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13750 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
13751 insn
, tick
, replace
) == 0)
13755 /* If we haven't found a reason for it to be invalid, it is valid. */
13759 /* Get the last value assigned to X, if known. Some registers
13760 in the value may be replaced with (clobber (const_int 0)) if their value
13761 is known longer known reliably. */
13764 get_last_value (const_rtx x
)
13766 unsigned int regno
;
13768 reg_stat_type
*rsp
;
13770 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13771 then convert it to the desired mode. If this is a paradoxical SUBREG,
13772 we cannot predict what values the "extra" bits might have. */
13773 if (GET_CODE (x
) == SUBREG
13774 && subreg_lowpart_p (x
)
13775 && !paradoxical_subreg_p (x
)
13776 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
13777 return gen_lowpart (GET_MODE (x
), value
);
13783 rsp
= ®_stat
[regno
];
13784 value
= rsp
->last_set_value
;
13786 /* If we don't have a value, or if it isn't for this basic block and
13787 it's either a hard register, set more than once, or it's a live
13788 at the beginning of the function, return 0.
13790 Because if it's not live at the beginning of the function then the reg
13791 is always set before being used (is never used without being set).
13792 And, if it's set only once, and it's always set before use, then all
13793 uses must have the same last value, even if it's not from this basic
13797 || (rsp
->last_set_label
< label_tick_ebb_start
13798 && (regno
< FIRST_PSEUDO_REGISTER
13799 || regno
>= reg_n_sets_max
13800 || REG_N_SETS (regno
) != 1
13802 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), regno
))))
13805 /* If the value was set in a later insn than the ones we are processing,
13806 we can't use it even if the register was only set once. */
13807 if (rsp
->last_set_label
== label_tick
13808 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
13811 /* If fewer bits were set than what we are asked for now, we cannot use
13813 if (maybe_lt (GET_MODE_PRECISION (rsp
->last_set_mode
),
13814 GET_MODE_PRECISION (GET_MODE (x
))))
13817 /* If the value has all its registers valid, return it. */
13818 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
13821 /* Otherwise, make a copy and replace any invalid register with
13822 (clobber (const_int 0)). If that fails for some reason, return 0. */
13824 value
= copy_rtx (value
);
13825 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
13831 /* Define three variables used for communication between the following
13834 static unsigned int reg_dead_regno
, reg_dead_endregno
;
13835 static int reg_dead_flag
;
13838 /* Function called via note_stores from reg_dead_at_p.
13840 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13841 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13844 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
13846 unsigned int regno
, endregno
;
13851 regno
= REGNO (dest
);
13852 endregno
= END_REGNO (dest
);
13853 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
13854 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
13857 /* Return nonzero if REG is known to be dead at INSN.
13859 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13860 referencing REG, it is dead. If we hit a SET referencing REG, it is
13861 live. Otherwise, see if it is live or dead at the start of the basic
13862 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13863 must be assumed to be always live. */
13866 reg_dead_at_p (rtx reg
, rtx_insn
*insn
)
13871 /* Set variables for reg_dead_at_p_1. */
13872 reg_dead_regno
= REGNO (reg
);
13873 reg_dead_endregno
= END_REGNO (reg
);
13874 reg_dead_reg
= reg
;
13878 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13879 we allow the machine description to decide whether use-and-clobber
13880 patterns are OK. */
13881 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
13883 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13884 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
13888 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13889 beginning of basic block. */
13890 block
= BLOCK_FOR_INSN (insn
);
13895 if (find_regno_note (insn
, REG_UNUSED
, reg_dead_regno
))
13898 note_stores (insn
, reg_dead_at_p_1
, NULL
);
13900 return reg_dead_flag
== 1 ? 1 : 0;
13902 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
13906 if (insn
== BB_HEAD (block
))
13909 insn
= PREV_INSN (insn
);
13912 /* Look at live-in sets for the basic block that we were in. */
13913 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13914 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
13920 /* Note hard registers in X that are used. */
13923 mark_used_regs_combine (rtx x
)
13925 RTX_CODE code
= GET_CODE (x
);
13926 unsigned int regno
;
13937 case ADDR_DIFF_VEC
:
13939 /* CC0 must die in the insn after it is set, so we don't need to take
13940 special note of it here. */
13945 /* If we are clobbering a MEM, mark any hard registers inside the
13946 address as used. */
13947 if (MEM_P (XEXP (x
, 0)))
13948 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
13953 /* A hard reg in a wide mode may really be multiple registers.
13954 If so, mark all of them just like the first. */
13955 if (regno
< FIRST_PSEUDO_REGISTER
)
13957 /* None of this applies to the stack, frame or arg pointers. */
13958 if (regno
== STACK_POINTER_REGNUM
13959 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13960 && regno
== HARD_FRAME_POINTER_REGNUM
)
13961 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
13962 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
13963 || regno
== FRAME_POINTER_REGNUM
)
13966 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
13972 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13974 rtx testreg
= SET_DEST (x
);
13976 while (GET_CODE (testreg
) == SUBREG
13977 || GET_CODE (testreg
) == ZERO_EXTRACT
13978 || GET_CODE (testreg
) == STRICT_LOW_PART
)
13979 testreg
= XEXP (testreg
, 0);
13981 if (MEM_P (testreg
))
13982 mark_used_regs_combine (XEXP (testreg
, 0));
13984 mark_used_regs_combine (SET_SRC (x
));
13992 /* Recursively scan the operands of this expression. */
13995 const char *fmt
= GET_RTX_FORMAT (code
);
13997 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
14000 mark_used_regs_combine (XEXP (x
, i
));
14001 else if (fmt
[i
] == 'E')
14005 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
14006 mark_used_regs_combine (XVECEXP (x
, i
, j
));
14012 /* Remove register number REGNO from the dead registers list of INSN.
14014 Return the note used to record the death, if there was one. */
14017 remove_death (unsigned int regno
, rtx_insn
*insn
)
14019 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
14022 remove_note (insn
, note
);
14027 /* For each register (hardware or pseudo) used within expression X, if its
14028 death is in an instruction with luid between FROM_LUID (inclusive) and
14029 TO_INSN (exclusive), put a REG_DEAD note for that register in the
14030 list headed by PNOTES.
14032 That said, don't move registers killed by maybe_kill_insn.
14034 This is done when X is being merged by combination into TO_INSN. These
14035 notes will then be distributed as needed. */
14038 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx_insn
*to_insn
,
14043 enum rtx_code code
= GET_CODE (x
);
14047 unsigned int regno
= REGNO (x
);
14048 rtx_insn
*where_dead
= reg_stat
[regno
].last_death
;
14050 /* If we do not know where the register died, it may still die between
14051 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
14052 if (!where_dead
|| DF_INSN_LUID (where_dead
) >= DF_INSN_LUID (to_insn
))
14054 rtx_insn
*insn
= prev_real_nondebug_insn (to_insn
);
14056 && BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (to_insn
)
14057 && DF_INSN_LUID (insn
) >= from_luid
)
14059 if (dead_or_set_regno_p (insn
, regno
))
14061 if (find_regno_note (insn
, REG_DEAD
, regno
))
14066 insn
= prev_real_nondebug_insn (insn
);
14070 /* Don't move the register if it gets killed in between from and to. */
14071 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
14072 && ! reg_referenced_p (x
, maybe_kill_insn
))
14076 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
14077 && DF_INSN_LUID (where_dead
) >= from_luid
14078 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
14080 rtx note
= remove_death (regno
, where_dead
);
14082 /* It is possible for the call above to return 0. This can occur
14083 when last_death points to I2 or I1 that we combined with.
14084 In that case make a new note.
14086 We must also check for the case where X is a hard register
14087 and NOTE is a death note for a range of hard registers
14088 including X. In that case, we must put REG_DEAD notes for
14089 the remaining registers in place of NOTE. */
14091 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
14092 && partial_subreg_p (GET_MODE (x
), GET_MODE (XEXP (note
, 0))))
14094 unsigned int deadregno
= REGNO (XEXP (note
, 0));
14095 unsigned int deadend
= END_REGNO (XEXP (note
, 0));
14096 unsigned int ourend
= END_REGNO (x
);
14099 for (i
= deadregno
; i
< deadend
; i
++)
14100 if (i
< regno
|| i
>= ourend
)
14101 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
14104 /* If we didn't find any note, or if we found a REG_DEAD note that
14105 covers only part of the given reg, and we have a multi-reg hard
14106 register, then to be safe we must check for REG_DEAD notes
14107 for each register other than the first. They could have
14108 their own REG_DEAD notes lying around. */
14109 else if ((note
== 0
14111 && partial_subreg_p (GET_MODE (XEXP (note
, 0)),
14113 && regno
< FIRST_PSEUDO_REGISTER
14114 && REG_NREGS (x
) > 1)
14116 unsigned int ourend
= END_REGNO (x
);
14117 unsigned int i
, offset
;
14121 offset
= hard_regno_nregs (regno
, GET_MODE (XEXP (note
, 0)));
14125 for (i
= regno
+ offset
; i
< ourend
; i
++)
14126 move_deaths (regno_reg_rtx
[i
],
14127 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
14130 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
14132 XEXP (note
, 1) = *pnotes
;
14136 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
14142 else if (GET_CODE (x
) == SET
)
14144 rtx dest
= SET_DEST (x
);
14146 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14148 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14149 that accesses one word of a multi-word item, some
14150 piece of everything register in the expression is used by
14151 this insn, so remove any old death. */
14152 /* ??? So why do we test for equality of the sizes? */
14154 if (GET_CODE (dest
) == ZERO_EXTRACT
14155 || GET_CODE (dest
) == STRICT_LOW_PART
14156 || (GET_CODE (dest
) == SUBREG
14157 && !read_modify_subreg_p (dest
)))
14159 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14163 /* If this is some other SUBREG, we know it replaces the entire
14164 value, so use that as the destination. */
14165 if (GET_CODE (dest
) == SUBREG
)
14166 dest
= SUBREG_REG (dest
);
14168 /* If this is a MEM, adjust deaths of anything used in the address.
14169 For a REG (the only other possibility), the entire value is
14170 being replaced so the old value is not used in this insn. */
14173 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
14178 else if (GET_CODE (x
) == CLOBBER
)
14181 len
= GET_RTX_LENGTH (code
);
14182 fmt
= GET_RTX_FORMAT (code
);
14184 for (i
= 0; i
< len
; i
++)
14189 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
14190 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
14193 else if (fmt
[i
] == 'e')
14194 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14198 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14199 pattern of an insn. X must be a REG. */
14202 reg_bitfield_target_p (rtx x
, rtx body
)
14206 if (GET_CODE (body
) == SET
)
14208 rtx dest
= SET_DEST (body
);
14210 unsigned int regno
, tregno
, endregno
, endtregno
;
14212 if (GET_CODE (dest
) == ZERO_EXTRACT
)
14213 target
= XEXP (dest
, 0);
14214 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
14215 target
= SUBREG_REG (XEXP (dest
, 0));
14219 if (GET_CODE (target
) == SUBREG
)
14220 target
= SUBREG_REG (target
);
14222 if (!REG_P (target
))
14225 tregno
= REGNO (target
), regno
= REGNO (x
);
14226 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
14227 return target
== x
;
14229 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
14230 endregno
= end_hard_regno (GET_MODE (x
), regno
);
14232 return endregno
> tregno
&& regno
< endtregno
;
14235 else if (GET_CODE (body
) == PARALLEL
)
14236 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
14237 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
14243 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14244 as appropriate. I3 and I2 are the insns resulting from the combination
14245 insns including FROM (I2 may be zero).
14247 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14248 not need REG_DEAD notes because they are being substituted for. This
14249 saves searching in the most common cases.
14251 Each note in the list is either ignored or placed on some insns, depending
14252 on the type of note. */
14255 distribute_notes (rtx notes
, rtx_insn
*from_insn
, rtx_insn
*i3
, rtx_insn
*i2
,
14256 rtx elim_i2
, rtx elim_i1
, rtx elim_i0
)
14258 rtx note
, next_note
;
14260 rtx_insn
*tem_insn
;
14262 for (note
= notes
; note
; note
= next_note
)
14264 rtx_insn
*place
= 0, *place2
= 0;
14266 next_note
= XEXP (note
, 1);
14267 switch (REG_NOTE_KIND (note
))
14271 /* Doesn't matter much where we put this, as long as it's somewhere.
14272 It is preferable to keep these notes on branches, which is most
14273 likely to be i3. */
14277 case REG_NON_LOCAL_GOTO
:
14282 gcc_assert (i2
&& JUMP_P (i2
));
14287 case REG_EH_REGION
:
14288 /* These notes must remain with the call or trapping instruction. */
14291 else if (i2
&& CALL_P (i2
))
14295 gcc_assert (cfun
->can_throw_non_call_exceptions
);
14296 if (may_trap_p (i3
))
14298 else if (i2
&& may_trap_p (i2
))
14300 /* ??? Otherwise assume we've combined things such that we
14301 can now prove that the instructions can't trap. Drop the
14302 note in this case. */
14306 case REG_ARGS_SIZE
:
14307 /* ??? How to distribute between i3-i1. Assume i3 contains the
14308 entire adjustment. Assert i3 contains at least some adjust. */
14309 if (!noop_move_p (i3
))
14311 poly_int64 old_size
, args_size
= get_args_size (note
);
14312 /* fixup_args_size_notes looks at REG_NORETURN note,
14313 so ensure the note is placed there first. */
14317 for (np
= &next_note
; *np
; np
= &XEXP (*np
, 1))
14318 if (REG_NOTE_KIND (*np
) == REG_NORETURN
)
14322 XEXP (n
, 1) = REG_NOTES (i3
);
14323 REG_NOTES (i3
) = n
;
14327 old_size
= fixup_args_size_notes (PREV_INSN (i3
), i3
, args_size
);
14328 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14329 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14330 gcc_assert (maybe_ne (old_size
, args_size
)
14332 && !ACCUMULATE_OUTGOING_ARGS
14333 && find_reg_note (i3
, REG_NORETURN
, NULL_RTX
)));
14340 case REG_CALL_DECL
:
14341 case REG_CALL_NOCF_CHECK
:
14342 /* These notes must remain with the call. It should not be
14343 possible for both I2 and I3 to be a call. */
14348 gcc_assert (i2
&& CALL_P (i2
));
14354 /* Any clobbers for i3 may still exist, and so we must process
14355 REG_UNUSED notes from that insn.
14357 Any clobbers from i2 or i1 can only exist if they were added by
14358 recog_for_combine. In that case, recog_for_combine created the
14359 necessary REG_UNUSED notes. Trying to keep any original
14360 REG_UNUSED notes from these insns can cause incorrect output
14361 if it is for the same register as the original i3 dest.
14362 In that case, we will notice that the register is set in i3,
14363 and then add a REG_UNUSED note for the destination of i3, which
14364 is wrong. However, it is possible to have REG_UNUSED notes from
14365 i2 or i1 for register which were both used and clobbered, so
14366 we keep notes from i2 or i1 if they will turn into REG_DEAD
14369 /* If this register is set or clobbered in I3, put the note there
14370 unless there is one already. */
14371 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
14373 if (from_insn
!= i3
)
14376 if (! (REG_P (XEXP (note
, 0))
14377 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
14378 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
14381 /* Otherwise, if this register is used by I3, then this register
14382 now dies here, so we must put a REG_DEAD note here unless there
14384 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
14385 && ! (REG_P (XEXP (note
, 0))
14386 ? find_regno_note (i3
, REG_DEAD
,
14387 REGNO (XEXP (note
, 0)))
14388 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
14390 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14394 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14395 but we can't tell which at this point. We must reset any
14396 expectations we had about the value that was previously
14397 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14398 and, if appropriate, restore its previous value, but we
14399 don't have enough information for that at this point. */
14402 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14404 /* Otherwise, if this register is now referenced in i2
14405 then the register used to be modified in one of the
14406 original insns. If it was i3 (say, in an unused
14407 parallel), it's now completely gone, so the note can
14408 be discarded. But if it was modified in i2, i1 or i0
14409 and we still reference it in i2, then we're
14410 referencing the previous value, and since the
14411 register was modified and REG_UNUSED, we know that
14412 the previous value is now dead. So, if we only
14413 reference the register in i2, we change the note to
14414 REG_DEAD, to reflect the previous value. However, if
14415 we're also setting or clobbering the register as
14416 scratch, we know (because the register was not
14417 referenced in i3) that it's unused, just as it was
14418 unused before, and we place the note in i2. */
14419 if (from_insn
!= i3
&& i2
&& INSN_P (i2
)
14420 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14422 if (!reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14423 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14424 if (! (REG_P (XEXP (note
, 0))
14425 ? find_regno_note (i2
, REG_NOTE_KIND (note
),
14426 REGNO (XEXP (note
, 0)))
14427 : find_reg_note (i2
, REG_NOTE_KIND (note
),
14438 /* These notes say something about results of an insn. We can
14439 only support them if they used to be on I3 in which case they
14440 remain on I3. Otherwise they are ignored.
14442 If the note refers to an expression that is not a constant, we
14443 must also ignore the note since we cannot tell whether the
14444 equivalence is still true. It might be possible to do
14445 slightly better than this (we only have a problem if I2DEST
14446 or I1DEST is present in the expression), but it doesn't
14447 seem worth the trouble. */
14449 if (from_insn
== i3
14450 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
14455 /* These notes say something about how a register is used. They must
14456 be present on any use of the register in I2 or I3. */
14457 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
14460 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
14469 case REG_LABEL_TARGET
:
14470 case REG_LABEL_OPERAND
:
14471 /* This can show up in several ways -- either directly in the
14472 pattern, or hidden off in the constant pool with (or without?)
14473 a REG_EQUAL note. */
14474 /* ??? Ignore the without-reg_equal-note problem for now. */
14475 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
14476 || ((tem_note
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
14477 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14478 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0)))
14482 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
14483 || ((tem_note
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
14484 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14485 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0))))
14493 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14494 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14496 if (place
&& JUMP_P (place
)
14497 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14498 && (JUMP_LABEL (place
) == NULL
14499 || JUMP_LABEL (place
) == XEXP (note
, 0)))
14501 rtx label
= JUMP_LABEL (place
);
14504 JUMP_LABEL (place
) = XEXP (note
, 0);
14505 else if (LABEL_P (label
))
14506 LABEL_NUSES (label
)--;
14509 if (place2
&& JUMP_P (place2
)
14510 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14511 && (JUMP_LABEL (place2
) == NULL
14512 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
14514 rtx label
= JUMP_LABEL (place2
);
14517 JUMP_LABEL (place2
) = XEXP (note
, 0);
14518 else if (LABEL_P (label
))
14519 LABEL_NUSES (label
)--;
14525 /* This note says something about the value of a register prior
14526 to the execution of an insn. It is too much trouble to see
14527 if the note is still correct in all situations. It is better
14528 to simply delete it. */
14532 /* If we replaced the right hand side of FROM_INSN with a
14533 REG_EQUAL note, the original use of the dying register
14534 will not have been combined into I3 and I2. In such cases,
14535 FROM_INSN is guaranteed to be the first of the combined
14536 instructions, so we simply need to search back before
14537 FROM_INSN for the previous use or set of this register,
14538 then alter the notes there appropriately.
14540 If the register is used as an input in I3, it dies there.
14541 Similarly for I2, if it is nonzero and adjacent to I3.
14543 If the register is not used as an input in either I3 or I2
14544 and it is not one of the registers we were supposed to eliminate,
14545 there are two possibilities. We might have a non-adjacent I2
14546 or we might have somehow eliminated an additional register
14547 from a computation. For example, we might have had A & B where
14548 we discover that B will always be zero. In this case we will
14549 eliminate the reference to A.
14551 In both cases, we must search to see if we can find a previous
14552 use of A and put the death note there. */
14555 && from_insn
== i2mod
14556 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
14557 tem_insn
= from_insn
;
14561 && CALL_P (from_insn
)
14562 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
14564 else if (i2
&& reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14566 /* If the new I2 sets the same register that is marked
14567 dead in the note, we do not in general know where to
14568 put the note. One important case we _can_ handle is
14569 when the note comes from I3. */
14570 if (from_insn
== i3
)
14575 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
14577 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
14578 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14580 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
14582 && reg_overlap_mentioned_p (XEXP (note
, 0),
14584 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
14585 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
14592 basic_block bb
= this_basic_block
;
14594 for (tem_insn
= PREV_INSN (tem_insn
); place
== 0; tem_insn
= PREV_INSN (tem_insn
))
14596 if (!NONDEBUG_INSN_P (tem_insn
))
14598 if (tem_insn
== BB_HEAD (bb
))
14603 /* If the register is being set at TEM_INSN, see if that is all
14604 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14605 into a REG_UNUSED note instead. Don't delete sets to
14606 global register vars. */
14607 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
14608 || !global_regs
[REGNO (XEXP (note
, 0))])
14609 && reg_set_p (XEXP (note
, 0), PATTERN (tem_insn
)))
14611 rtx set
= single_set (tem_insn
);
14612 rtx inner_dest
= 0;
14613 rtx_insn
*cc0_setter
= NULL
;
14616 for (inner_dest
= SET_DEST (set
);
14617 (GET_CODE (inner_dest
) == STRICT_LOW_PART
14618 || GET_CODE (inner_dest
) == SUBREG
14619 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
14620 inner_dest
= XEXP (inner_dest
, 0))
14623 /* Verify that it was the set, and not a clobber that
14624 modified the register.
14626 CC0 targets must be careful to maintain setter/user
14627 pairs. If we cannot delete the setter due to side
14628 effects, mark the user with an UNUSED note instead
14631 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
14632 && rtx_equal_p (XEXP (note
, 0), inner_dest
)
14634 || (! reg_mentioned_p (cc0_rtx
, SET_SRC (set
))
14635 || ((cc0_setter
= prev_cc0_setter (tem_insn
)) != NULL
14636 && sets_cc0_p (PATTERN (cc0_setter
)) > 0))))
14638 /* Move the notes and links of TEM_INSN elsewhere.
14639 This might delete other dead insns recursively.
14640 First set the pattern to something that won't use
14642 rtx old_notes
= REG_NOTES (tem_insn
);
14644 PATTERN (tem_insn
) = pc_rtx
;
14645 REG_NOTES (tem_insn
) = NULL
;
14647 distribute_notes (old_notes
, tem_insn
, tem_insn
, NULL
,
14648 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14649 distribute_links (LOG_LINKS (tem_insn
));
14651 unsigned int regno
= REGNO (XEXP (note
, 0));
14652 reg_stat_type
*rsp
= ®_stat
[regno
];
14653 if (rsp
->last_set
== tem_insn
)
14654 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14656 SET_INSN_DELETED (tem_insn
);
14657 if (tem_insn
== i2
)
14660 /* Delete the setter too. */
14663 PATTERN (cc0_setter
) = pc_rtx
;
14664 old_notes
= REG_NOTES (cc0_setter
);
14665 REG_NOTES (cc0_setter
) = NULL
;
14667 distribute_notes (old_notes
, cc0_setter
,
14669 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14670 distribute_links (LOG_LINKS (cc0_setter
));
14672 SET_INSN_DELETED (cc0_setter
);
14673 if (cc0_setter
== i2
)
14679 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
14681 /* If there isn't already a REG_UNUSED note, put one
14682 here. Do not place a REG_DEAD note, even if
14683 the register is also used here; that would not
14684 match the algorithm used in lifetime analysis
14685 and can cause the consistency check in the
14686 scheduler to fail. */
14687 if (! find_regno_note (tem_insn
, REG_UNUSED
,
14688 REGNO (XEXP (note
, 0))))
14693 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem_insn
))
14694 || (CALL_P (tem_insn
)
14695 && find_reg_fusage (tem_insn
, USE
, XEXP (note
, 0))))
14699 /* If we are doing a 3->2 combination, and we have a
14700 register which formerly died in i3 and was not used
14701 by i2, which now no longer dies in i3 and is used in
14702 i2 but does not die in i2, and place is between i2
14703 and i3, then we may need to move a link from place to
14705 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
14707 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
14708 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14710 struct insn_link
*links
= LOG_LINKS (place
);
14711 LOG_LINKS (place
) = NULL
;
14712 distribute_links (links
);
14717 if (tem_insn
== BB_HEAD (bb
))
14723 /* If the register is set or already dead at PLACE, we needn't do
14724 anything with this note if it is still a REG_DEAD note.
14725 We check here if it is set at all, not if is it totally replaced,
14726 which is what `dead_or_set_p' checks, so also check for it being
14729 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
14731 unsigned int regno
= REGNO (XEXP (note
, 0));
14732 reg_stat_type
*rsp
= ®_stat
[regno
];
14734 if (dead_or_set_p (place
, XEXP (note
, 0))
14735 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
14737 /* Unless the register previously died in PLACE, clear
14738 last_death. [I no longer understand why this is
14740 if (rsp
->last_death
!= place
)
14741 rsp
->last_death
= 0;
14745 rsp
->last_death
= place
;
14747 /* If this is a death note for a hard reg that is occupying
14748 multiple registers, ensure that we are still using all
14749 parts of the object. If we find a piece of the object
14750 that is unused, we must arrange for an appropriate REG_DEAD
14751 note to be added for it. However, we can't just emit a USE
14752 and tag the note to it, since the register might actually
14753 be dead; so we recourse, and the recursive call then finds
14754 the previous insn that used this register. */
14756 if (place
&& REG_NREGS (XEXP (note
, 0)) > 1)
14758 unsigned int endregno
= END_REGNO (XEXP (note
, 0));
14759 bool all_used
= true;
14762 for (i
= regno
; i
< endregno
; i
++)
14763 if ((! refers_to_regno_p (i
, PATTERN (place
))
14764 && ! find_regno_fusage (place
, USE
, i
))
14765 || dead_or_set_regno_p (place
, i
))
14773 /* Put only REG_DEAD notes for pieces that are
14774 not already dead or set. */
14776 for (i
= regno
; i
< endregno
;
14777 i
+= hard_regno_nregs (i
, reg_raw_mode
[i
]))
14779 rtx piece
= regno_reg_rtx
[i
];
14780 basic_block bb
= this_basic_block
;
14782 if (! dead_or_set_p (place
, piece
)
14783 && ! reg_bitfield_target_p (piece
,
14786 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
14789 distribute_notes (new_note
, place
, place
,
14790 NULL
, NULL_RTX
, NULL_RTX
,
14793 else if (! refers_to_regno_p (i
, PATTERN (place
))
14794 && ! find_regno_fusage (place
, USE
, i
))
14795 for (tem_insn
= PREV_INSN (place
); ;
14796 tem_insn
= PREV_INSN (tem_insn
))
14798 if (!NONDEBUG_INSN_P (tem_insn
))
14800 if (tem_insn
== BB_HEAD (bb
))
14804 if (dead_or_set_p (tem_insn
, piece
)
14805 || reg_bitfield_target_p (piece
,
14806 PATTERN (tem_insn
)))
14808 add_reg_note (tem_insn
, REG_UNUSED
, piece
);
14821 /* Any other notes should not be present at this point in the
14823 gcc_unreachable ();
14828 XEXP (note
, 1) = REG_NOTES (place
);
14829 REG_NOTES (place
) = note
;
14831 /* Set added_notes_insn to the earliest insn we added a note to. */
14832 if (added_notes_insn
== 0
14833 || DF_INSN_LUID (added_notes_insn
) > DF_INSN_LUID (place
))
14834 added_notes_insn
= place
;
14839 add_shallow_copy_of_reg_note (place2
, note
);
14841 /* Set added_notes_insn to the earliest insn we added a note to. */
14842 if (added_notes_insn
== 0
14843 || DF_INSN_LUID (added_notes_insn
) > DF_INSN_LUID (place2
))
14844 added_notes_insn
= place2
;
14849 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14850 I3, I2, and I1 to new locations. This is also called to add a link
14851 pointing at I3 when I3's destination is changed. */
14854 distribute_links (struct insn_link
*links
)
14856 struct insn_link
*link
, *next_link
;
14858 for (link
= links
; link
; link
= next_link
)
14860 rtx_insn
*place
= 0;
14864 next_link
= link
->next
;
14866 /* If the insn that this link points to is a NOTE, ignore it. */
14867 if (NOTE_P (link
->insn
))
14871 rtx pat
= PATTERN (link
->insn
);
14872 if (GET_CODE (pat
) == SET
)
14874 else if (GET_CODE (pat
) == PARALLEL
)
14877 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
14879 set
= XVECEXP (pat
, 0, i
);
14880 if (GET_CODE (set
) != SET
)
14883 reg
= SET_DEST (set
);
14884 while (GET_CODE (reg
) == ZERO_EXTRACT
14885 || GET_CODE (reg
) == STRICT_LOW_PART
14886 || GET_CODE (reg
) == SUBREG
)
14887 reg
= XEXP (reg
, 0);
14892 if (REGNO (reg
) == link
->regno
)
14895 if (i
== XVECLEN (pat
, 0))
14901 reg
= SET_DEST (set
);
14903 while (GET_CODE (reg
) == ZERO_EXTRACT
14904 || GET_CODE (reg
) == STRICT_LOW_PART
14905 || GET_CODE (reg
) == SUBREG
)
14906 reg
= XEXP (reg
, 0);
14911 /* A LOG_LINK is defined as being placed on the first insn that uses
14912 a register and points to the insn that sets the register. Start
14913 searching at the next insn after the target of the link and stop
14914 when we reach a set of the register or the end of the basic block.
14916 Note that this correctly handles the link that used to point from
14917 I3 to I2. Also note that not much searching is typically done here
14918 since most links don't point very far away. */
14920 for (insn
= NEXT_INSN (link
->insn
);
14921 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
14922 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
14923 insn
= NEXT_INSN (insn
))
14924 if (DEBUG_INSN_P (insn
))
14926 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
14928 if (reg_referenced_p (reg
, PATTERN (insn
)))
14932 else if (CALL_P (insn
)
14933 && find_reg_fusage (insn
, USE
, reg
))
14938 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
14941 /* If we found a place to put the link, place it there unless there
14942 is already a link to the same insn as LINK at that point. */
14946 struct insn_link
*link2
;
14948 FOR_EACH_LOG_LINK (link2
, place
)
14949 if (link2
->insn
== link
->insn
&& link2
->regno
== link
->regno
)
14954 link
->next
= LOG_LINKS (place
);
14955 LOG_LINKS (place
) = link
;
14957 /* Set added_links_insn to the earliest insn we added a
14959 if (added_links_insn
== 0
14960 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
14961 added_links_insn
= place
;
14967 /* Check for any register or memory mentioned in EQUIV that is not
14968 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14969 of EXPR where some registers may have been replaced by constants. */
14972 unmentioned_reg_p (rtx equiv
, rtx expr
)
14974 subrtx_iterator::array_type array
;
14975 FOR_EACH_SUBRTX (iter
, array
, equiv
, NONCONST
)
14977 const_rtx x
= *iter
;
14978 if ((REG_P (x
) || MEM_P (x
))
14979 && !reg_mentioned_p (x
, expr
))
14985 DEBUG_FUNCTION
void
14986 dump_combine_stats (FILE *file
)
14990 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14991 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
14995 dump_combine_total_stats (FILE *file
)
14999 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
15000 total_attempts
, total_merges
, total_extras
, total_successes
);
15003 /* Make pseudo-to-pseudo copies after every hard-reg-to-pseudo-copy, because
15004 the reg-to-reg copy can usefully combine with later instructions, but we
15005 do not want to combine the hard reg into later instructions, for that
15006 restricts register allocation. */
15008 make_more_copies (void)
15012 FOR_EACH_BB_FN (bb
, cfun
)
15016 FOR_BB_INSNS (bb
, insn
)
15018 if (!NONDEBUG_INSN_P (insn
))
15021 rtx set
= single_set (insn
);
15025 rtx dest
= SET_DEST (set
);
15026 if (!(REG_P (dest
) && !HARD_REGISTER_P (dest
)))
15029 rtx src
= SET_SRC (set
);
15030 if (!(REG_P (src
) && HARD_REGISTER_P (src
)))
15032 if (TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
)))
15035 rtx new_reg
= gen_reg_rtx (GET_MODE (dest
));
15036 rtx_insn
*new_insn
= gen_move_insn (new_reg
, src
);
15037 SET_SRC (set
) = new_reg
;
15038 emit_insn_before (new_insn
, insn
);
15039 df_insn_rescan (insn
);
15044 /* Try combining insns through substitution. */
15045 static unsigned int
15046 rest_of_handle_combine (void)
15048 make_more_copies ();
15050 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
15051 df_note_add_problem ();
15054 regstat_init_n_sets_and_refs ();
15055 reg_n_sets_max
= max_reg_num ();
15057 int rebuild_jump_labels_after_combine
15058 = combine_instructions (get_insns (), max_reg_num ());
15060 /* Combining insns may have turned an indirect jump into a
15061 direct jump. Rebuild the JUMP_LABEL fields of jumping
15063 if (rebuild_jump_labels_after_combine
)
15065 if (dom_info_available_p (CDI_DOMINATORS
))
15066 free_dominance_info (CDI_DOMINATORS
);
15067 timevar_push (TV_JUMP
);
15068 rebuild_jump_labels (get_insns ());
15070 timevar_pop (TV_JUMP
);
15073 regstat_free_n_sets_and_refs ();
15079 const pass_data pass_data_combine
=
15081 RTL_PASS
, /* type */
15082 "combine", /* name */
15083 OPTGROUP_NONE
, /* optinfo_flags */
15084 TV_COMBINE
, /* tv_id */
15085 PROP_cfglayout
, /* properties_required */
15086 0, /* properties_provided */
15087 0, /* properties_destroyed */
15088 0, /* todo_flags_start */
15089 TODO_df_finish
, /* todo_flags_finish */
15092 class pass_combine
: public rtl_opt_pass
15095 pass_combine (gcc::context
*ctxt
)
15096 : rtl_opt_pass (pass_data_combine
, ctxt
)
15099 /* opt_pass methods: */
15100 virtual bool gate (function
*) { return (optimize
> 0); }
15101 virtual unsigned int execute (function
*)
15103 return rest_of_handle_combine ();
15106 }; // class pass_combine
15108 } // anon namespace
15111 make_pass_combine (gcc::context
*ctxt
)
15113 return new pass_combine (ctxt
);