RTL_PASS, // type
"", // name (will be patched)
OPTGROUP_NONE, // optinfo_flags
- TV_DF_SCAN, // tv_id
+ TV_MACH_DEP, // tv_id
0, // properties_required
0, // properties_provided
0, // properties_destroyed
- PLUS insn of that kind.
- Indirect loads and stores.
In almost all cases, combine opportunities arise from the preparation
- done by `avr_split_tiny_move', but in some rare cases combinations are
- found for the ordinary cores, too.
+ done by `avr_split_fake_addressing_move', but in some rare cases combinations
+ are found for the ordinary cores, too.
As we consider at most one Mem insn per try, there may still be missed
optimizations like POST_INC + PLUS + POST_INC might be performed
as PRE_DEC + PRE_DEC for two adjacent locations. */
core's capabilities. This sets the stage for pass .avr-fuse-add. */
bool
-avr_split_tiny_move (rtx_insn * /*insn*/, rtx *xop)
+avr_split_fake_addressing_move (rtx_insn * /*insn*/, rtx *xop)
{
bool store_p = false;
rtx mem, reg_or_0;
/* A post reload optimization pass that fuses PLUS insns with CONST_INT
addend with a load or store insn to get POST_INC or PRE_DEC addressing.
It can also fuse two PLUSes to a single one, which may occur due to
- splits from `avr_split_tiny_move'. We do this in an own pass because
- it can find more cases than peephole2, for example when there are
- unrelated insns between the interesting ones. */
+ splits from `avr_split_fake_addressing_move'. We do this in an own
+ pass because it can find more cases than peephole2, for example when
+ there are unrelated insns between the interesting ones. */
INSERT_PASS_BEFORE (pass_peephole2, 1, avr_pass_fuse_add);
tries to fix such situations by operating on the original mode. This
reduces code size and register pressure.
- The assertion is that the code generated by casesi is unaltered and a
+ The assertion is that the code generated by casesi is unaltered and
a sign-extend or zero-extend from QImode or HImode precedes the casesi
- insns withaout any insns in between. */
+ insns without any insns in between. */
INSERT_PASS_AFTER (pass_expand, 1, avr_pass_casesi);
extern rtl_opt_pass *make_avr_pass_ifelse (gcc::context *);
#ifdef RTX_CODE
extern bool avr_casei_sequence_check_operands (rtx *xop);
-extern bool avr_split_tiny_move (rtx_insn *insn, rtx *operands);
+extern bool avr_split_fake_addressing_move (rtx_insn *insn, rtx *operands);
#endif /* RTX_CODE */
/* From avr-log.cc */
(clobber (reg:CC REG_CC))])])
-;; For LPM loads from AS1 we split
-;; R = *Z
-;; to
-;; R = *Z++
-;; Z = Z - sizeof (R)
-;;
-;; so that the second instruction can be optimized out.
-
-(define_split ; "split-lpmx"
- [(set (match_operand:HISI 0 "register_operand" "")
- (match_operand:HISI 1 "memory_operand" ""))]
- "reload_completed
- && AVR_HAVE_LPMX
- && avr_mem_flash_p (operands[1])
- && REG_P (XEXP (operands[1], 0))
- && !reg_overlap_mentioned_p (XEXP (operands[1], 0), operands[0])"
- [(set (match_dup 0)
- (match_dup 2))
- (set (match_dup 3)
- (plus:HI (match_dup 3)
- (match_dup 4)))]
- {
- rtx addr = XEXP (operands[1], 0);
-
- operands[2] = replace_equiv_address (operands[1],
- gen_rtx_POST_INC (Pmode, addr));
- operands[3] = addr;
- operands[4] = gen_int_mode (-<SIZE>, HImode);
- })
-
-
;; Legitimate address and stuff allows way more addressing modes than
;; Reduced Tiny actually supports. Split them now so that we get
;; closer to real instructions which may result in some optimization
-;; opportunities.
+;; opportunities. This applies also to fake X + offset addressing.
(define_split
[(parallel [(set (match_operand:MOVMODE 0 "nonimmediate_operand")
(match_operand:MOVMODE 1 "general_operand"))
&& (MEM_P (operands[0]) || MEM_P (operands[1]))"
[(scratch)]
{
- if (avr_split_tiny_move (curr_insn, operands))
+ if (avr_split_fake_addressing_move (curr_insn, operands))
DONE;
FAIL;
})