/* Subroutines used by or related to instruction recognition.
- Copyright (C) 1987-2016 Free Software Foundation, Inc.
+ Copyright (C) 1987-2019 Free Software Foundation, Inc.
This file is part of GCC.
#include "tree.h"
#include "cfghooks.h"
#include "df.h"
+#include "memmodel.h"
#include "tm_p.h"
#include "insn-config.h"
#include "regs.h"
{
rtx object;
int old_code;
+ bool unshare;
rtx *loc;
rtx old;
- bool unshare;
};
static change_t *changes;
&& REG_P (changes[i].old)
&& asm_noperands (PATTERN (object)) > 0
&& REG_EXPR (changes[i].old) != NULL_TREE
+ && HAS_DECL_ASSEMBLER_NAME_P (REG_EXPR (changes[i].old))
&& DECL_ASSEMBLER_NAME_SET_P (REG_EXPR (changes[i].old))
&& DECL_REGISTER (REG_EXPR (changes[i].old)))
{
rtx x = *loc;
enum rtx_code code = GET_CODE (x);
rtx new_rtx = NULL_RTX;
+ scalar_int_mode is_mode;
if (SWAPPABLE_OPERANDS_P (x)
&& swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
happen, we might just fail in some cases). */
if (MEM_P (XEXP (x, 0))
+ && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &is_mode)
&& CONST_INT_P (XEXP (x, 1))
&& CONST_INT_P (XEXP (x, 2))
&& !mode_dependent_address_p (XEXP (XEXP (x, 0), 0),
MEM_ADDR_SPACE (XEXP (x, 0)))
&& !MEM_VOLATILE_P (XEXP (x, 0)))
{
- machine_mode wanted_mode = VOIDmode;
- machine_mode is_mode = GET_MODE (XEXP (x, 0));
int pos = INTVAL (XEXP (x, 2));
-
+ machine_mode new_mode = is_mode;
if (GET_CODE (x) == ZERO_EXTRACT && targetm.have_extzv ())
- {
- wanted_mode = insn_data[targetm.code_for_extzv].operand[1].mode;
- if (wanted_mode == VOIDmode)
- wanted_mode = word_mode;
- }
+ new_mode = insn_data[targetm.code_for_extzv].operand[1].mode;
else if (GET_CODE (x) == SIGN_EXTRACT && targetm.have_extv ())
- {
- wanted_mode = insn_data[targetm.code_for_extv].operand[1].mode;
- if (wanted_mode == VOIDmode)
- wanted_mode = word_mode;
- }
+ new_mode = insn_data[targetm.code_for_extv].operand[1].mode;
+ scalar_int_mode wanted_mode = (new_mode == VOIDmode
+ ? word_mode
+ : as_a <scalar_int_mode> (new_mode));
/* If we have a narrower mode, we can do something. */
- if (wanted_mode != VOIDmode
- && GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ if (GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
{
int offset = pos / BITS_PER_UNIT;
rtx newmem;
However, we must allow them after reload so that they can
get cleaned up by cleanup_subreg_operands. */
if (!reload_completed && MEM_P (sub)
- && GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (sub)))
+ && paradoxical_subreg_p (op))
return 0;
#endif
/* Avoid memories with nonzero SUBREG_BYTE, as offsetting the memory
might be called from cleanup_subreg_operands.
??? This is a kludge. */
- if (!reload_completed && SUBREG_BYTE (op) != 0
+ if (!reload_completed
+ && maybe_ne (SUBREG_BYTE (op), 0)
&& MEM_P (sub))
return 0;
-#ifdef CANNOT_CHANGE_MODE_CLASS
if (REG_P (sub)
&& REGNO (sub) < FIRST_PSEUDO_REGISTER
- && REG_CANNOT_CHANGE_MODE_P (REGNO (sub), GET_MODE (sub), mode)
+ && !REG_CAN_CHANGE_MODE_P (REGNO (sub), GET_MODE (sub), mode)
&& GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_INT
&& GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_FLOAT
/* LRA can generate some invalid SUBREGS just for matched
valid. */
&& ! LRA_SUBREG_P (op))
return 0;
-#endif
/* FLOAT_MODE subregs can't be paradoxical. Combine will occasionally
create such rtl, and we must reject it. */
size of floating point mode can be less than the integer
mode. */
&& ! lra_in_progress
- && GET_MODE_SIZE (GET_MODE (op)) > GET_MODE_SIZE (GET_MODE (sub)))
+ && paradoxical_subreg_p (op))
return 0;
op = sub;
int
address_operand (rtx op, machine_mode mode)
{
+ /* Wrong mode for an address expr. */
+ if (GET_MODE (op) != VOIDmode
+ && ! SCALAR_INT_MODE_P (GET_MODE (op)))
+ return false;
+
return memory_address_p (mode, op);
}
if (mode != VOIDmode)
{
- int prec = GET_MODE_PRECISION (mode);
- int bitsize = GET_MODE_BITSIZE (mode);
+ scalar_int_mode int_mode = as_a <scalar_int_mode> (mode);
+ int prec = GET_MODE_PRECISION (int_mode);
+ int bitsize = GET_MODE_BITSIZE (int_mode);
if (CONST_WIDE_INT_NUNITS (op) * HOST_BITS_PER_WIDE_INT > bitsize)
return 0;
int
push_operand (rtx op, machine_mode mode)
{
- unsigned int rounded_size = GET_MODE_SIZE (mode);
-
-#ifdef PUSH_ROUNDING
- rounded_size = PUSH_ROUNDING (rounded_size);
-#endif
-
if (!MEM_P (op))
return 0;
if (mode != VOIDmode && GET_MODE (op) != mode)
return 0;
+ poly_int64 rounded_size = GET_MODE_SIZE (mode);
+
+#ifdef PUSH_ROUNDING
+ rounded_size = PUSH_ROUNDING (MACRO_INT (rounded_size));
+#endif
+
op = XEXP (op, 0);
- if (rounded_size == GET_MODE_SIZE (mode))
+ if (known_eq (rounded_size, GET_MODE_SIZE (mode)))
{
if (GET_CODE (op) != STACK_PUSH_CODE)
return 0;
}
else
{
+ poly_int64 offset;
if (GET_CODE (op) != PRE_MODIFY
|| GET_CODE (XEXP (op, 1)) != PLUS
|| XEXP (XEXP (op, 1), 0) != XEXP (op, 0)
- || !CONST_INT_P (XEXP (XEXP (op, 1), 1))
- || INTVAL (XEXP (XEXP (op, 1), 1))
- != ((STACK_GROWS_DOWNWARD ? -1 : 1) * (int) rounded_size))
+ || !poly_int_rtx_p (XEXP (XEXP (op, 1), 1), &offset)
+ || (STACK_GROWS_DOWNWARD
+ ? maybe_ne (offset, -rounded_size)
+ : maybe_ne (offset, rounded_size)))
return 0;
}
if (! reload_completed
&& GET_CODE (op) == SUBREG && MEM_P (SUBREG_REG (op)))
{
- int offset = SUBREG_BYTE (op);
- rtx inner = SUBREG_REG (op);
-
if (mode != VOIDmode && GET_MODE (op) != mode)
return 0;
address is if OFFSET is zero and the address already is an operand
or if the address is (plus Y (const_int -OFFSET)) and Y is an
operand. */
-
- return ((offset == 0 && general_operand (XEXP (inner, 0), Pmode))
- || (GET_CODE (XEXP (inner, 0)) == PLUS
- && CONST_INT_P (XEXP (XEXP (inner, 0), 1))
- && INTVAL (XEXP (XEXP (inner, 0), 1)) == -offset
- && general_operand (XEXP (XEXP (inner, 0), 0), Pmode)));
+ poly_int64 offset;
+ rtx addr = strip_offset (XEXP (SUBREG_REG (op), 0), &offset);
+ return (known_eq (offset + SUBREG_BYTE (op), 0)
+ && general_operand (addr, Pmode));
}
return (MEM_P (op)
/* If BODY is an insn body that uses ASM_OPERANDS,
return the number of operands (both input and output) in the insn.
+ If BODY is an insn body that uses ASM_INPUT with CLOBBERS in PARALLEL,
+ return 0.
Otherwise return -1. */
int
asm_noperands (const_rtx body)
{
rtx asm_op = extract_asm_operands (CONST_CAST_RTX (body));
- int n_sets = 0;
+ int i, n_sets = 0;
if (asm_op == NULL)
- return -1;
+ {
+ if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) >= 2
+ && GET_CODE (XVECEXP (body, 0, 0)) == ASM_INPUT)
+ {
+ /* body is [(asm_input ...) (clobber (reg ...))...]. */
+ for (i = XVECLEN (body, 0) - 1; i > 0; i--)
+ if (GET_CODE (XVECEXP (body, 0, i)) != CLOBBER)
+ return -1;
+ return 0;
+ }
+ return -1;
+ }
if (GET_CODE (body) == SET)
n_sets = 1;
else if (GET_CODE (body) == PARALLEL)
{
- int i;
if (GET_CODE (XVECEXP (body, 0, 0)) == SET)
{
/* Multiple output operands, or 1 output plus some clobbers:
the locations of the operands within the insn into the vector OPERAND_LOCS,
and the constraints for the operands into CONSTRAINTS.
Write the modes of the operands into MODES.
+ Write the location info into LOC.
Return the assembler-template.
+ If BODY is an insn body that uses ASM_INPUT with CLOBBERS in PARALLEL,
+ return the basic assembly string.
- If MODES, OPERAND_LOCS, CONSTRAINTS or OPERANDS is 0,
+ If LOC, MODES, OPERAND_LOCS, CONSTRAINTS or OPERANDS is 0,
we don't store that info. */
const char *
{
if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
break; /* Past last SET */
+ gcc_assert (GET_CODE (XVECEXP (body, 0, i)) == SET);
if (operands)
operands[i] = SET_DEST (XVECEXP (body, 0, i));
if (operand_locs)
}
nbase = i;
}
+ else if (GET_CODE (asmop) == ASM_INPUT)
+ {
+ if (loc)
+ *loc = ASM_INPUT_SOURCE_LOCATION (asmop);
+ return XSTR (asmop, 0);
+ }
break;
}
Match any memory and hope things are resolved after reload. */
incdec_ok = true;
+ /* FALLTHRU */
default:
cn = lookup_constraint (constraint);
switch (get_constraint_type (cn))
break;
case CT_MEMORY:
+ case CT_SPECIAL_MEMORY:
/* Every memory operand can be reloaded to fit. */
result = result || memory_operand (op, VOIDmode);
break;
len = CONSTRAINT_LEN (c, constraint);
do
constraint++;
- while (--len && *constraint);
+ while (--len && *constraint && *constraint != ',');
if (len)
return 0;
}
int (*addressp) (machine_mode, rtx, addr_space_t) =
(strictp ? strict_memory_address_addr_space_p
: memory_address_addr_space_p);
- unsigned int mode_sz = GET_MODE_SIZE (mode);
+ poly_int64 mode_sz = GET_MODE_SIZE (mode);
if (CONSTANT_ADDRESS_P (y))
return 1;
Clearly that depends on the situation in which it's being used.
However, the current situation in which we test 0xffffffff is
less than ideal. Caveat user. */
- if (mode_sz == 0)
+ if (known_eq (mode_sz, 0))
mode_sz = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
/* If the expression contains a constant term,
go inside a LO_SUM here, so we do so as well. */
if (GET_CODE (y) == LO_SUM
&& mode != BLKmode
- && mode_sz <= GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT)
+ && known_le (mode_sz, GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT))
z = gen_rtx_LO_SUM (address_mode, XEXP (y, 0),
plus_constant (address_mode, XEXP (y, 1),
mode_sz - 1));
case ADDR_VEC:
case ADDR_DIFF_VEC:
case VAR_LOCATION:
+ case DEBUG_MARKER:
return;
case SET:
case PARALLEL:
if ((GET_CODE (XVECEXP (body, 0, 0)) == SET
&& GET_CODE (SET_SRC (XVECEXP (body, 0, 0))) == ASM_OPERANDS)
- || GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
+ || GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS
+ || GET_CODE (XVECEXP (body, 0, 0)) == ASM_INPUT)
goto asm_insn;
else
goto normal_insn;
which_alternative = -1;
}
-/* Fill in OP_ALT_BASE for an instruction that has N_OPERANDS operands,
- N_ALTERNATIVES alternatives and constraint strings CONSTRAINTS.
- OP_ALT_BASE has N_ALTERNATIVES * N_OPERANDS entries and CONSTRAINTS
- has N_OPERANDS entries. */
+/* Fill in OP_ALT_BASE for an instruction that has N_OPERANDS
+ operands, N_ALTERNATIVES alternatives and constraint strings
+ CONSTRAINTS. OP_ALT_BASE has N_ALTERNATIVES * N_OPERANDS entries
+ and CONSTRAINTS has N_OPERANDS entries. OPLOC should be passed in
+ if the insn is an asm statement and preprocessing should take the
+ asm operands into account, e.g. to determine whether they could be
+ addresses in constraints that require addresses; it should then
+ point to an array of pointers to each operand. */
void
preprocess_constraints (int n_operands, int n_alternatives,
const char **constraints,
- operand_alternative *op_alt_base)
+ operand_alternative *op_alt_base,
+ rtx **oploc)
{
for (int i = 0; i < n_operands; i++)
{
break;
case CT_MEMORY:
+ case CT_SPECIAL_MEMORY:
op_alt[i].memory_ok = 1;
break;
case CT_ADDRESS:
+ if (oploc && !address_operand (*oploc[i], VOIDmode))
+ break;
+
op_alt[i].is_address = 1;
op_alt[i].cl
= (reg_class_subunion
for (int i = 0; i < n_operands; ++i)
constraints[i] = insn_data[icode].operand[i].constraint;
- preprocess_constraints (n_operands, n_alternatives, constraints, op_alt);
+ preprocess_constraints (n_operands, n_alternatives, constraints, op_alt,
+ NULL);
this_target_recog->x_op_alt[icode] = op_alt;
return op_alt;
int n_entries = n_operands * n_alternatives;
memset (asm_op_alt, 0, n_entries * sizeof (operand_alternative));
preprocess_constraints (n_operands, n_alternatives,
- recog_data.constraints, asm_op_alt);
+ recog_data.constraints, asm_op_alt,
+ NULL);
recog_op_alt = asm_op_alt;
}
}
/* p is used for address_operands. When we are called by
gen_reload, no one will have checked that the address is
strictly valid, i.e., that all pseudos requiring hard regs
- have gotten them. */
- if (strict <= 0
- || (strict_memory_address_p (recog_data.operand_mode[opno],
- op)))
+ have gotten them. We also want to make sure we have a
+ valid mode. */
+ if ((GET_MODE (op) == VOIDmode
+ || SCALAR_INT_MODE_P (GET_MODE (op)))
+ && (strict <= 0
+ || (strict_memory_address_p
+ (recog_data.operand_mode[opno], op))))
win = 1;
break;
void
split_all_insns (void)
{
- sbitmap blocks;
bool changed;
+ bool need_cfg_cleanup = false;
basic_block bb;
- blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap blocks (last_basic_block_for_fn (cfun));
bitmap_clear (blocks);
changed = false;
CODE_LABELS and short-out basic blocks. */
next = NEXT_INSN (insn);
finish = (insn == BB_END (bb));
+
+ /* If INSN has a REG_EH_REGION note and we split INSN, the
+ resulting split may not have/need REG_EH_REGION notes.
+
+ If that happens and INSN was the last reference to the
+ given EH region, then the EH region will become unreachable.
+ We cannot leave the unreachable blocks in the CFG as that
+ will trigger a checking failure.
+
+ So track if INSN has a REG_EH_REGION note. If so and we
+ split INSN, then trigger a CFG cleanup. */
+ rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
if (INSN_P (insn))
{
rtx set = single_set (insn);
nops then anyways. */
if (reload_completed)
delete_insn_and_edges (insn);
+ if (note)
+ need_cfg_cleanup = true;
}
else
{
{
bitmap_set_bit (blocks, bb->index);
changed = true;
+ if (note)
+ need_cfg_cleanup = true;
}
}
}
default_rtl_profile ();
if (changed)
- find_many_sub_basic_blocks (blocks);
+ {
+ find_many_sub_basic_blocks (blocks);
+
+ /* Splitting could drop an REG_EH_REGION if it potentially
+ trapped in its original form, but does not in its split
+ form. Consider a FLOAT_TRUNCATE which splits into a memory
+ store/load pair and -fnon-call-exceptions. */
+ if (need_cfg_cleanup)
+ cleanup_cfg (0);
+ }
checking_verify_flow_info ();
-
- sbitmap_free (blocks);
}
/* Same as split_all_insns, but do not expect CFG to be available.
#endif
/* Can it support the mode we need? */
- if (! HARD_REGNO_MODE_OK (regno, mode))
+ if (!targetm.hard_regno_mode_ok (regno, mode))
continue;
success = 1;
- for (j = 0; success && j < hard_regno_nregs[regno][mode]; j++)
+ for (j = 0; success && j < hard_regno_nregs (regno, mode); j++)
{
/* Don't allocate fixed registers. */
if (fixed_regs[regno + j])
case REG_NORETURN:
case REG_SETJMP:
case REG_TM:
+ case REG_CALL_NOCF_CHECK:
add_reg_note (new_insn, REG_NOTE_KIND (note),
XEXP (note, 0));
break;
last = emit_insn_after_setloc (attempt,
peep2_insn_data[i].insn,
INSN_LOCATION (peepinsn));
+ if (JUMP_P (peepinsn) && JUMP_P (last))
+ CROSSING_JUMP_P (last) = CROSSING_JUMP_P (peepinsn);
before_try = PREV_INSN (insn);
delete_insn_chain (insn, peep2_insn_data[i].insn, false);
flags);
nehe->probability = eh_edge->probability;
- nfte->probability
- = REG_BR_PROB_BASE - nehe->probability;
+ nfte->probability = nehe->probability.invert ();
peep2_do_cleanup_cfg |= purge_dead_edges (nfte->dest);
bb = nfte->src;
/* Re-insert the ARGS_SIZE notes. */
if (as_note)
- fixup_args_size_notes (before_try, last, INTVAL (XEXP (as_note, 0)));
+ fixup_args_size_notes (before_try, last, get_args_size (as_note));
/* If we generated a jump instruction, it won't have
JUMP_LABEL set. Recompute after we're done. */
/* Common predicates for use with define_bypass. */
-/* True if the dependency between OUT_INSN and IN_INSN is on the store
- data not the address operand(s) of the store. IN_INSN and OUT_INSN
- must be either a single_set or a PARALLEL with SETs inside. */
+/* Helper function for store_data_bypass_p, handle just a single SET
+ IN_SET. */
-int
-store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
+static bool
+store_data_bypass_p_1 (rtx_insn *out_insn, rtx in_set)
{
- rtx out_set, in_set;
- rtx out_pat, in_pat;
- rtx out_exp, in_exp;
- int i, j;
+ if (!MEM_P (SET_DEST (in_set)))
+ return false;
- in_set = single_set (in_insn);
- if (in_set)
+ rtx out_set = single_set (out_insn);
+ if (out_set)
+ return !reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_set));
+
+ rtx out_pat = PATTERN (out_insn);
+ if (GET_CODE (out_pat) != PARALLEL)
+ return false;
+
+ for (int i = 0; i < XVECLEN (out_pat, 0); i++)
{
- if (!MEM_P (SET_DEST (in_set)))
- return false;
+ rtx out_exp = XVECEXP (out_pat, 0, i);
- out_set = single_set (out_insn);
- if (out_set)
- {
- if (reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_set)))
- return false;
- }
- else
- {
- out_pat = PATTERN (out_insn);
+ if (GET_CODE (out_exp) == CLOBBER || GET_CODE (out_exp) == USE
+ || GET_CODE (out_exp) == CLOBBER_HIGH)
+ continue;
- if (GET_CODE (out_pat) != PARALLEL)
- return false;
+ gcc_assert (GET_CODE (out_exp) == SET);
- for (i = 0; i < XVECLEN (out_pat, 0); i++)
- {
- out_exp = XVECEXP (out_pat, 0, i);
+ if (reg_mentioned_p (SET_DEST (out_exp), SET_DEST (in_set)))
+ return false;
+ }
- if (GET_CODE (out_exp) == CLOBBER)
- continue;
+ return true;
+}
- gcc_assert (GET_CODE (out_exp) == SET);
+/* True if the dependency between OUT_INSN and IN_INSN is on the store
+ data not the address operand(s) of the store. IN_INSN and OUT_INSN
+ must be either a single_set or a PARALLEL with SETs inside. */
- if (reg_mentioned_p (SET_DEST (out_exp), SET_DEST (in_set)))
- return false;
- }
- }
- }
- else
- {
- in_pat = PATTERN (in_insn);
- gcc_assert (GET_CODE (in_pat) == PARALLEL);
+int
+store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
+{
+ rtx in_set = single_set (in_insn);
+ if (in_set)
+ return store_data_bypass_p_1 (out_insn, in_set);
- for (i = 0; i < XVECLEN (in_pat, 0); i++)
- {
- in_exp = XVECEXP (in_pat, 0, i);
+ rtx in_pat = PATTERN (in_insn);
+ if (GET_CODE (in_pat) != PARALLEL)
+ return false;
- if (GET_CODE (in_exp) == CLOBBER)
- continue;
+ for (int i = 0; i < XVECLEN (in_pat, 0); i++)
+ {
+ rtx in_exp = XVECEXP (in_pat, 0, i);
- gcc_assert (GET_CODE (in_exp) == SET);
+ if (GET_CODE (in_exp) == CLOBBER || GET_CODE (in_exp) == USE
+ || GET_CODE (in_exp) == CLOBBER_HIGH)
+ continue;
- if (!MEM_P (SET_DEST (in_exp)))
- return false;
+ gcc_assert (GET_CODE (in_exp) == SET);
- out_set = single_set (out_insn);
- if (out_set)
- {
- if (reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_exp)))
- return false;
- }
- else
- {
- out_pat = PATTERN (out_insn);
- gcc_assert (GET_CODE (out_pat) == PARALLEL);
-
- for (j = 0; j < XVECLEN (out_pat, 0); j++)
- {
- out_exp = XVECEXP (out_pat, 0, j);
-
- if (GET_CODE (out_exp) == CLOBBER)
- continue;
-
- gcc_assert (GET_CODE (out_exp) == SET);
-
- if (reg_mentioned_p (SET_DEST (out_exp), SET_DEST (in_exp)))
- return false;
- }
- }
- }
+ if (!store_data_bypass_p_1 (out_insn, in_exp))
+ return false;
}
return true;
{
rtx exp = XVECEXP (out_pat, 0, i);
- if (GET_CODE (exp) == CLOBBER)
+ if (GET_CODE (exp) == CLOBBER || GET_CODE (exp) == CLOBBER_HIGH)
continue;
gcc_assert (GET_CODE (exp) == SET);
OPTGROUP_NONE, /* optinfo_flags */
TV_NONE, /* tv_id */
0, /* properties_required */
- 0, /* properties_provided */
+ PROP_rtl_split_insns, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
return new pass_split_all_insns (ctxt);
}
-static unsigned int
-rest_of_handle_split_after_reload (void)
-{
- /* If optimizing, then go ahead and split insns now. */
-#ifndef STACK_REGS
- if (optimize > 0)
-#endif
- split_all_insns ();
- return 0;
-}
-
namespace {
const pass_data pass_data_split_after_reload =
{}
/* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ /* If optimizing, then go ahead and split insns now. */
+ if (optimize > 0)
+ return true;
+
+#ifdef STACK_REGS
+ return true;
+#else
+ return false;
+#endif
+ }
+
virtual unsigned int execute (function *)
{
- return rest_of_handle_split_after_reload ();
+ split_all_insns ();
+ return 0;
}
}; // class pass_split_after_reload