/* Subroutines used by or related to instruction recognition.
- Copyright (C) 1987-2014 Free Software Foundation, Inc.
+ Copyright (C) 1987-2019 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
+#include "target.h"
+#include "rtl.h"
#include "tree.h"
-#include "rtl-error.h"
+#include "cfghooks.h"
+#include "df.h"
+#include "memmodel.h"
#include "tm_p.h"
#include "insn-config.h"
-#include "insn-attr.h"
-#include "hard-reg-set.h"
-#include "recog.h"
#include "regs.h"
+#include "emit-rtl.h"
+#include "recog.h"
+#include "insn-attr.h"
#include "addresses.h"
-#include "expr.h"
-#include "hashtab.h"
-#include "hash-set.h"
-#include "vec.h"
-#include "machmode.h"
-#include "input.h"
-#include "function.h"
-#include "flags.h"
-#include "basic-block.h"
+#include "cfgrtl.h"
+#include "cfgbuild.h"
+#include "cfgcleanup.h"
#include "reload.h"
-#include "target.h"
#include "tree-pass.h"
-#include "df.h"
-#include "insn-codes.h"
-
-#ifndef STACK_PUSH_CODE
-#ifdef STACK_GROWS_DOWNWARD
-#define STACK_PUSH_CODE PRE_DEC
-#else
-#define STACK_PUSH_CODE PRE_INC
-#endif
-#endif
#ifndef STACK_POP_CODE
-#ifdef STACK_GROWS_DOWNWARD
+#if STACK_GROWS_DOWNWARD
#define STACK_POP_CODE POST_INC
#else
#define STACK_POP_CODE POST_DEC
#endif
#endif
-static void validate_replace_rtx_1 (rtx *, rtx, rtx, rtx, bool);
+static void validate_replace_rtx_1 (rtx *, rtx, rtx, rtx_insn *, bool);
static void validate_replace_src_1 (rtx *, void *);
-static rtx split_insn (rtx_insn *);
+static rtx_insn *split_insn (rtx_insn *);
struct target_recog default_target_recog;
#if SWITCHABLE_TARGET
\f
/* Static data for the next two routines. */
-typedef struct change_t
+struct change_t
{
rtx object;
int old_code;
+ bool unshare;
rtx *loc;
rtx old;
- bool unshare;
-} change_t;
+};
static change_t *changes;
static int changes_allocated;
Return true if anything was changed. */
bool
-canonicalize_change_group (rtx insn, rtx x)
+canonicalize_change_group (rtx_insn *insn, rtx x)
{
if (COMMUTATIVE_P (x)
&& swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
&& REG_P (changes[i].old)
&& asm_noperands (PATTERN (object)) > 0
&& REG_EXPR (changes[i].old) != NULL_TREE
+ && HAS_DECL_ASSEMBLER_NAME_P (REG_EXPR (changes[i].old))
&& DECL_ASSEMBLER_NAME_SET_P (REG_EXPR (changes[i].old))
&& DECL_REGISTER (REG_EXPR (changes[i].old)))
{
}
/* Reduce conditional compilation elsewhere. */
-#ifndef HAVE_extv
-#define HAVE_extv 0
-#define CODE_FOR_extv CODE_FOR_nothing
-#endif
-#ifndef HAVE_extzv
-#define HAVE_extzv 0
-#define CODE_FOR_extzv CODE_FOR_nothing
-#endif
-
/* A subroutine of validate_replace_rtx_1 that tries to simplify the resulting
rtx. */
static void
-simplify_while_replacing (rtx *loc, rtx to, rtx object,
- enum machine_mode op0_mode)
+simplify_while_replacing (rtx *loc, rtx to, rtx_insn *object,
+ machine_mode op0_mode)
{
rtx x = *loc;
enum rtx_code code = GET_CODE (x);
rtx new_rtx = NULL_RTX;
+ scalar_int_mode is_mode;
if (SWAPPABLE_OPERANDS_P (x)
&& swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
happen, we might just fail in some cases). */
if (MEM_P (XEXP (x, 0))
+ && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &is_mode)
&& CONST_INT_P (XEXP (x, 1))
&& CONST_INT_P (XEXP (x, 2))
&& !mode_dependent_address_p (XEXP (XEXP (x, 0), 0),
MEM_ADDR_SPACE (XEXP (x, 0)))
&& !MEM_VOLATILE_P (XEXP (x, 0)))
{
- enum machine_mode wanted_mode = VOIDmode;
- enum machine_mode is_mode = GET_MODE (XEXP (x, 0));
int pos = INTVAL (XEXP (x, 2));
-
- if (GET_CODE (x) == ZERO_EXTRACT && HAVE_extzv)
- {
- wanted_mode = insn_data[CODE_FOR_extzv].operand[1].mode;
- if (wanted_mode == VOIDmode)
- wanted_mode = word_mode;
- }
- else if (GET_CODE (x) == SIGN_EXTRACT && HAVE_extv)
- {
- wanted_mode = insn_data[CODE_FOR_extv].operand[1].mode;
- if (wanted_mode == VOIDmode)
- wanted_mode = word_mode;
- }
+ machine_mode new_mode = is_mode;
+ if (GET_CODE (x) == ZERO_EXTRACT && targetm.have_extzv ())
+ new_mode = insn_data[targetm.code_for_extzv].operand[1].mode;
+ else if (GET_CODE (x) == SIGN_EXTRACT && targetm.have_extv ())
+ new_mode = insn_data[targetm.code_for_extv].operand[1].mode;
+ scalar_int_mode wanted_mode = (new_mode == VOIDmode
+ ? word_mode
+ : as_a <scalar_int_mode> (new_mode));
/* If we have a narrower mode, we can do something. */
- if (wanted_mode != VOIDmode
- && GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ if (GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
{
int offset = pos / BITS_PER_UNIT;
rtx newmem;
validate_change passing OBJECT. */
static void
-validate_replace_rtx_1 (rtx *loc, rtx from, rtx to, rtx object,
+validate_replace_rtx_1 (rtx *loc, rtx from, rtx to, rtx_insn *object,
bool simplify)
{
int i, j;
const char *fmt;
rtx x = *loc;
enum rtx_code code;
- enum machine_mode op0_mode = VOIDmode;
+ machine_mode op0_mode = VOIDmode;
int prev_changes = num_changes;
if (!x)
if INSN is still valid. */
int
-validate_replace_rtx_subexp (rtx from, rtx to, rtx insn, rtx *loc)
+validate_replace_rtx_subexp (rtx from, rtx to, rtx_insn *insn, rtx *loc)
{
validate_replace_rtx_1 (loc, from, to, insn, true);
return apply_change_group ();
changes have been made, validate by seeing if INSN is still valid. */
int
-validate_replace_rtx (rtx from, rtx to, rtx insn)
+validate_replace_rtx (rtx from, rtx to, rtx_insn *insn)
{
validate_replace_rtx_1 (&PATTERN (insn), from, to, insn, true);
return apply_change_group ();
validate_replace_rtx_part (from, to, &PATTERN (insn), insn). */
int
-validate_replace_rtx_part (rtx from, rtx to, rtx *where, rtx insn)
+validate_replace_rtx_part (rtx from, rtx to, rtx *where, rtx_insn *insn)
{
validate_replace_rtx_1 (where, from, to, insn, true);
return apply_change_group ();
/* Same as above, but do not simplify rtx afterwards. */
int
validate_replace_rtx_part_nosimplify (rtx from, rtx to, rtx *where,
- rtx insn)
+ rtx_insn *insn)
{
validate_replace_rtx_1 (where, from, to, insn, false);
return apply_change_group ();
will replace in REG_EQUAL and REG_EQUIV notes. */
void
-validate_replace_rtx_group (rtx from, rtx to, rtx insn)
+validate_replace_rtx_group (rtx from, rtx to, rtx_insn *insn)
{
rtx note;
validate_replace_rtx_1 (&PATTERN (insn), from, to, insn, true);
{
rtx from; /* Old RTX */
rtx to; /* New RTX */
- rtx insn; /* Insn in which substitution is occurring. */
+ rtx_insn *insn; /* Insn in which substitution is occurring. */
};
static void
SET_DESTs. */
void
-validate_replace_src_group (rtx from, rtx to, rtx insn)
+validate_replace_src_group (rtx from, rtx to, rtx_insn *insn)
{
struct validate_replace_src_data d;
pattern and return true if something was simplified. */
bool
-validate_simplify_insn (rtx insn)
+validate_simplify_insn (rtx_insn *insn)
{
int i;
rtx pat = NULL;
return ((num_changes_pending () > 0) && (apply_change_group () > 0));
}
\f
-#ifdef HAVE_cc0
/* Return 1 if the insn using CC0 set by INSN does not contain
any ordered tests applied to the condition codes.
EQ and NE tests do not count. */
int
-next_insn_tests_no_inequality (rtx insn)
+next_insn_tests_no_inequality (rtx_insn *insn)
{
- rtx next = next_cc0_user (insn);
+ rtx_insn *next = next_cc0_user (insn);
/* If there is no next insn, we have to take the conservative choice. */
if (next == 0)
return (INSN_P (next)
&& ! inequality_comparisons_p (PATTERN (next)));
}
-#endif
\f
/* Return 1 if OP is a valid general operand for machine mode MODE.
This is either a register reference, a memory reference,
expressions in the machine description. */
int
-general_operand (rtx op, enum machine_mode mode)
+general_operand (rtx op, machine_mode mode)
{
enum rtx_code code = GET_CODE (op);
However, we must allow them after reload so that they can
get cleaned up by cleanup_subreg_operands. */
if (!reload_completed && MEM_P (sub)
- && GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (sub)))
+ && paradoxical_subreg_p (op))
return 0;
#endif
/* Avoid memories with nonzero SUBREG_BYTE, as offsetting the memory
might be called from cleanup_subreg_operands.
??? This is a kludge. */
- if (!reload_completed && SUBREG_BYTE (op) != 0
+ if (!reload_completed
+ && maybe_ne (SUBREG_BYTE (op), 0)
&& MEM_P (sub))
return 0;
-#ifdef CANNOT_CHANGE_MODE_CLASS
if (REG_P (sub)
&& REGNO (sub) < FIRST_PSEUDO_REGISTER
- && REG_CANNOT_CHANGE_MODE_P (REGNO (sub), GET_MODE (sub), mode)
+ && !REG_CAN_CHANGE_MODE_P (REGNO (sub), GET_MODE (sub), mode)
&& GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_INT
&& GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_FLOAT
/* LRA can generate some invalid SUBREGS just for matched
valid. */
&& ! LRA_SUBREG_P (op))
return 0;
-#endif
/* FLOAT_MODE subregs can't be paradoxical. Combine will occasionally
create such rtl, and we must reject it. */
size of floating point mode can be less than the integer
mode. */
&& ! lra_in_progress
- && GET_MODE_SIZE (GET_MODE (op)) > GET_MODE_SIZE (GET_MODE (sub)))
+ && paradoxical_subreg_p (op))
return 0;
op = sub;
expressions in the machine description. */
int
-address_operand (rtx op, enum machine_mode mode)
+address_operand (rtx op, machine_mode mode)
{
+ /* Wrong mode for an address expr. */
+ if (GET_MODE (op) != VOIDmode
+ && ! SCALAR_INT_MODE_P (GET_MODE (op)))
+ return false;
+
return memory_address_p (mode, op);
}
expressions in the machine description. */
int
-register_operand (rtx op, enum machine_mode mode)
+register_operand (rtx op, machine_mode mode)
{
if (GET_CODE (op) == SUBREG)
{
/* Return 1 for a register in Pmode; ignore the tested mode. */
int
-pmode_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+pmode_register_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
{
return register_operand (op, Pmode);
}
or a hard register. */
int
-scratch_operand (rtx op, enum machine_mode mode)
+scratch_operand (rtx op, machine_mode mode)
{
if (GET_MODE (op) != mode && mode != VOIDmode)
return 0;
expressions in the machine description. */
int
-immediate_operand (rtx op, enum machine_mode mode)
+immediate_operand (rtx op, machine_mode mode)
{
/* Don't accept CONST_INT or anything similar
if the caller wants something floating. */
/* Returns 1 if OP is an operand that is a CONST_INT of mode MODE. */
int
-const_int_operand (rtx op, enum machine_mode mode)
+const_int_operand (rtx op, machine_mode mode)
{
if (!CONST_INT_P (op))
return 0;
/* Returns 1 if OP is an operand that is a CONST_INT or CONST_WIDE_INT
of mode MODE. */
int
-const_scalar_int_operand (rtx op, enum machine_mode mode)
+const_scalar_int_operand (rtx op, machine_mode mode)
{
if (!CONST_SCALAR_INT_P (op))
return 0;
if (mode != VOIDmode)
{
- int prec = GET_MODE_PRECISION (mode);
- int bitsize = GET_MODE_BITSIZE (mode);
+ scalar_int_mode int_mode = as_a <scalar_int_mode> (mode);
+ int prec = GET_MODE_PRECISION (int_mode);
+ int bitsize = GET_MODE_BITSIZE (int_mode);
if (CONST_WIDE_INT_NUNITS (op) * HOST_BITS_PER_WIDE_INT > bitsize)
return 0;
floating-point number of MODE. */
int
-const_double_operand (rtx op, enum machine_mode mode)
+const_double_operand (rtx op, machine_mode mode)
{
return (GET_CODE (op) == CONST_DOUBLE)
&& (GET_MODE (op) == mode || mode == VOIDmode);
floating-point number of MODE. */
int
-const_double_operand (rtx op, enum machine_mode mode)
+const_double_operand (rtx op, machine_mode mode)
{
/* Don't accept CONST_INT or anything similar
if the caller wants something floating. */
operand of mode MODE. */
int
-nonimmediate_operand (rtx op, enum machine_mode mode)
+nonimmediate_operand (rtx op, machine_mode mode)
{
return (general_operand (op, mode) && ! CONSTANT_P (op));
}
/* Return 1 if OP is a register reference or immediate value of mode MODE. */
int
-nonmemory_operand (rtx op, enum machine_mode mode)
+nonmemory_operand (rtx op, machine_mode mode)
{
if (CONSTANT_P (op))
return immediate_operand (op, mode);
expressions in the machine description. */
int
-push_operand (rtx op, enum machine_mode mode)
+push_operand (rtx op, machine_mode mode)
{
- unsigned int rounded_size = GET_MODE_SIZE (mode);
-
-#ifdef PUSH_ROUNDING
- rounded_size = PUSH_ROUNDING (rounded_size);
-#endif
-
if (!MEM_P (op))
return 0;
if (mode != VOIDmode && GET_MODE (op) != mode)
return 0;
+ poly_int64 rounded_size = GET_MODE_SIZE (mode);
+
+#ifdef PUSH_ROUNDING
+ rounded_size = PUSH_ROUNDING (MACRO_INT (rounded_size));
+#endif
+
op = XEXP (op, 0);
- if (rounded_size == GET_MODE_SIZE (mode))
+ if (known_eq (rounded_size, GET_MODE_SIZE (mode)))
{
if (GET_CODE (op) != STACK_PUSH_CODE)
return 0;
}
else
{
+ poly_int64 offset;
if (GET_CODE (op) != PRE_MODIFY
|| GET_CODE (XEXP (op, 1)) != PLUS
|| XEXP (XEXP (op, 1), 0) != XEXP (op, 0)
- || !CONST_INT_P (XEXP (XEXP (op, 1), 1))
-#ifdef STACK_GROWS_DOWNWARD
- || INTVAL (XEXP (XEXP (op, 1), 1)) != - (int) rounded_size
-#else
- || INTVAL (XEXP (XEXP (op, 1), 1)) != (int) rounded_size
-#endif
- )
+ || !poly_int_rtx_p (XEXP (XEXP (op, 1), 1), &offset)
+ || (STACK_GROWS_DOWNWARD
+ ? maybe_ne (offset, -rounded_size)
+ : maybe_ne (offset, rounded_size)))
return 0;
}
expressions in the machine description. */
int
-pop_operand (rtx op, enum machine_mode mode)
+pop_operand (rtx op, machine_mode mode)
{
if (!MEM_P (op))
return 0;
for mode MODE in address space AS. */
int
-memory_address_addr_space_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+memory_address_addr_space_p (machine_mode mode ATTRIBUTE_UNUSED,
rtx addr, addr_space_t as)
{
#ifdef GO_IF_LEGITIMATE_ADDRESS
expressions in the machine description. */
int
-memory_operand (rtx op, enum machine_mode mode)
+memory_operand (rtx op, machine_mode mode)
{
rtx inner;
that is, a memory reference whose address is a general_operand. */
int
-indirect_operand (rtx op, enum machine_mode mode)
+indirect_operand (rtx op, machine_mode mode)
{
/* Before reload, a SUBREG isn't in memory (see memory_operand, above). */
if (! reload_completed
&& GET_CODE (op) == SUBREG && MEM_P (SUBREG_REG (op)))
{
- int offset = SUBREG_BYTE (op);
- rtx inner = SUBREG_REG (op);
-
if (mode != VOIDmode && GET_MODE (op) != mode)
return 0;
address is if OFFSET is zero and the address already is an operand
or if the address is (plus Y (const_int -OFFSET)) and Y is an
operand. */
-
- return ((offset == 0 && general_operand (XEXP (inner, 0), Pmode))
- || (GET_CODE (XEXP (inner, 0)) == PLUS
- && CONST_INT_P (XEXP (XEXP (inner, 0), 1))
- && INTVAL (XEXP (XEXP (inner, 0), 1)) == -offset
- && general_operand (XEXP (XEXP (inner, 0), 0), Pmode)));
+ poly_int64 offset;
+ rtx addr = strip_offset (XEXP (SUBREG_REG (op), 0), &offset);
+ return (known_eq (offset + SUBREG_BYTE (op), 0)
+ && general_operand (addr, Pmode));
}
return (MEM_P (op)
ORDERED and UNORDERED). */
int
-ordered_comparison_operator (rtx op, enum machine_mode mode)
+ordered_comparison_operator (rtx op, machine_mode mode)
{
if (mode != VOIDmode && GET_MODE (op) != mode)
return false;
MATCH_OPERATOR to recognize all the branch insns. */
int
-comparison_operator (rtx op, enum machine_mode mode)
+comparison_operator (rtx op, machine_mode mode)
{
return ((mode == VOIDmode || GET_MODE (op) == mode)
&& COMPARISON_P (op));
/* If BODY is an insn body that uses ASM_OPERANDS,
return the number of operands (both input and output) in the insn.
+ If BODY is an insn body that uses ASM_INPUT with CLOBBERS in PARALLEL,
+ return 0.
Otherwise return -1. */
int
asm_noperands (const_rtx body)
{
rtx asm_op = extract_asm_operands (CONST_CAST_RTX (body));
- int n_sets = 0;
+ int i, n_sets = 0;
if (asm_op == NULL)
- return -1;
+ {
+ if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) >= 2
+ && GET_CODE (XVECEXP (body, 0, 0)) == ASM_INPUT)
+ {
+ /* body is [(asm_input ...) (clobber (reg ...))...]. */
+ for (i = XVECLEN (body, 0) - 1; i > 0; i--)
+ if (GET_CODE (XVECEXP (body, 0, i)) != CLOBBER)
+ return -1;
+ return 0;
+ }
+ return -1;
+ }
if (GET_CODE (body) == SET)
n_sets = 1;
else if (GET_CODE (body) == PARALLEL)
{
- int i;
if (GET_CODE (XVECEXP (body, 0, 0)) == SET)
{
/* Multiple output operands, or 1 output plus some clobbers:
the locations of the operands within the insn into the vector OPERAND_LOCS,
and the constraints for the operands into CONSTRAINTS.
Write the modes of the operands into MODES.
+ Write the location info into LOC.
Return the assembler-template.
+ If BODY is an insn body that uses ASM_INPUT with CLOBBERS in PARALLEL,
+ return the basic assembly string.
- If MODES, OPERAND_LOCS, CONSTRAINTS or OPERANDS is 0,
+ If LOC, MODES, OPERAND_LOCS, CONSTRAINTS or OPERANDS is 0,
we don't store that info. */
const char *
decode_asm_operands (rtx body, rtx *operands, rtx **operand_locs,
- const char **constraints, enum machine_mode *modes,
+ const char **constraints, machine_mode *modes,
location_t *loc)
{
int nbase = 0, n, i;
{
if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
break; /* Past last SET */
+ gcc_assert (GET_CODE (XVECEXP (body, 0, i)) == SET);
if (operands)
operands[i] = SET_DEST (XVECEXP (body, 0, i));
if (operand_locs)
}
nbase = i;
}
+ else if (GET_CODE (asmop) == ASM_INPUT)
+ {
+ if (loc)
+ *loc = ASM_INPUT_SOURCE_LOCATION (asmop);
+ return XSTR (asmop, 0);
+ }
break;
}
asm_operand_ok (rtx op, const char *constraint, const char **constraints)
{
int result = 0;
-#ifdef AUTO_INC_DEC
bool incdec_ok = false;
-#endif
/* Use constrain_operands after reload. */
gcc_assert (!reload_completed);
result = 1;
break;
-#ifdef AUTO_INC_DEC
case '<':
case '>':
/* ??? Before auto-inc-dec, auto inc/dec insns are not supposed
Match any memory and hope things are resolved after reload. */
incdec_ok = true;
-#endif
+ /* FALLTHRU */
default:
cn = lookup_constraint (constraint);
switch (get_constraint_type (cn))
break;
case CT_MEMORY:
+ case CT_SPECIAL_MEMORY:
/* Every memory operand can be reloaded to fit. */
result = result || memory_operand (op, VOIDmode);
break;
len = CONSTRAINT_LEN (c, constraint);
do
constraint++;
- while (--len && *constraint);
+ while (--len && *constraint && *constraint != ',');
if (len)
return 0;
}
-#ifdef AUTO_INC_DEC
/* For operands without < or > constraints reject side-effects. */
- if (!incdec_ok && result && MEM_P (op))
+ if (AUTO_INC_DEC && !incdec_ok && result && MEM_P (op))
switch (GET_CODE (XEXP (op, 0)))
{
case PRE_INC:
default:
break;
}
-#endif
return result;
}
for the sake of use in reload.c. */
int
-offsettable_address_addr_space_p (int strictp, enum machine_mode mode, rtx y,
+offsettable_address_addr_space_p (int strictp, machine_mode mode, rtx y,
addr_space_t as)
{
enum rtx_code ycode = GET_CODE (y);
rtx z;
rtx y1 = y;
rtx *y2;
- int (*addressp) (enum machine_mode, rtx, addr_space_t) =
+ int (*addressp) (machine_mode, rtx, addr_space_t) =
(strictp ? strict_memory_address_addr_space_p
: memory_address_addr_space_p);
- unsigned int mode_sz = GET_MODE_SIZE (mode);
+ poly_int64 mode_sz = GET_MODE_SIZE (mode);
if (CONSTANT_ADDRESS_P (y))
return 1;
if (mode_dependent_address_p (y, as))
return 0;
- enum machine_mode address_mode = GET_MODE (y);
+ machine_mode address_mode = GET_MODE (y);
if (address_mode == VOIDmode)
address_mode = targetm.addr_space.address_mode (as);
#ifdef POINTERS_EXTEND_UNSIGNED
- enum machine_mode pointer_mode = targetm.addr_space.pointer_mode (as);
+ machine_mode pointer_mode = targetm.addr_space.pointer_mode (as);
#endif
/* ??? How much offset does an offsettable BLKmode reference need?
Clearly that depends on the situation in which it's being used.
However, the current situation in which we test 0xffffffff is
less than ideal. Caveat user. */
- if (mode_sz == 0)
+ if (known_eq (mode_sz, 0))
mode_sz = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
/* If the expression contains a constant term,
go inside a LO_SUM here, so we do so as well. */
if (GET_CODE (y) == LO_SUM
&& mode != BLKmode
- && mode_sz <= GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT)
+ && known_le (mode_sz, GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT))
z = gen_rtx_LO_SUM (address_mode, XEXP (y, 0),
plus_constant (address_mode, XEXP (y, 1),
mode_sz - 1));
that the insn operands are already cached. As above, the attribute
mustn't depend on the values of operands, so we don't provide their
real values here. */
- rtx old_insn = recog_data.insn;
+ rtx_insn *old_insn = recog_data.insn;
int old_alternative = which_alternative;
recog_data.insn = insn;
case ADDR_VEC:
case ADDR_DIFF_VEC:
case VAR_LOCATION:
+ case DEBUG_MARKER:
return;
case SET:
case PARALLEL:
if ((GET_CODE (XVECEXP (body, 0, 0)) == SET
&& GET_CODE (SET_SRC (XVECEXP (body, 0, 0))) == ASM_OPERANDS)
- || GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
+ || GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS
+ || GET_CODE (XVECEXP (body, 0, 0)) == ASM_INPUT)
goto asm_insn;
else
goto normal_insn;
gcc_assert (recog_data.n_alternatives <= MAX_RECOG_ALTERNATIVES);
- recog_data.enabled_alternatives = get_enabled_alternatives (insn);
-
recog_data.insn = NULL;
which_alternative = -1;
}
-/* Fill in OP_ALT_BASE for an instruction that has N_OPERANDS operands,
- N_ALTERNATIVES alternatives and constraint strings CONSTRAINTS.
- OP_ALT_BASE has N_ALTERNATIVES * N_OPERANDS entries and CONSTRAINTS
- has N_OPERANDS entries. */
+/* Fill in OP_ALT_BASE for an instruction that has N_OPERANDS
+ operands, N_ALTERNATIVES alternatives and constraint strings
+ CONSTRAINTS. OP_ALT_BASE has N_ALTERNATIVES * N_OPERANDS entries
+ and CONSTRAINTS has N_OPERANDS entries. OPLOC should be passed in
+ if the insn is an asm statement and preprocessing should take the
+ asm operands into account, e.g. to determine whether they could be
+ addresses in constraints that require addresses; it should then
+ point to an array of pointers to each operand. */
void
preprocess_constraints (int n_operands, int n_alternatives,
const char **constraints,
- operand_alternative *op_alt_base)
+ operand_alternative *op_alt_base,
+ rtx **oploc)
{
for (int i = 0; i < n_operands; i++)
{
break;
case CT_MEMORY:
+ case CT_SPECIAL_MEMORY:
op_alt[i].memory_ok = 1;
break;
case CT_ADDRESS:
+ if (oploc && !address_operand (*oploc[i], VOIDmode))
+ break;
+
op_alt[i].is_address = 1;
op_alt[i].cl
= (reg_class_subunion
instruction ICODE. */
const operand_alternative *
-preprocess_insn_constraints (int icode)
+preprocess_insn_constraints (unsigned int icode)
{
- gcc_checking_assert (IN_RANGE (icode, 0, LAST_INSN_CODE));
+ gcc_checking_assert (IN_RANGE (icode, 0, NUM_INSN_CODES - 1));
if (this_target_recog->x_op_alt[icode])
return this_target_recog->x_op_alt[icode];
for (int i = 0; i < n_operands; ++i)
constraints[i] = insn_data[icode].operand[i].constraint;
- preprocess_constraints (n_operands, n_alternatives, constraints, op_alt);
+ preprocess_constraints (n_operands, n_alternatives, constraints, op_alt,
+ NULL);
this_target_recog->x_op_alt[icode] = op_alt;
return op_alt;
The collected data is stored in recog_op_alt. */
void
-preprocess_constraints (rtx insn)
+preprocess_constraints (rtx_insn *insn)
{
int icode = INSN_CODE (insn);
if (icode >= 0)
int n_entries = n_operands * n_alternatives;
memset (asm_op_alt, 0, n_entries * sizeof (operand_alternative));
preprocess_constraints (n_operands, n_alternatives,
- recog_data.constraints, asm_op_alt);
+ recog_data.constraints, asm_op_alt,
+ NULL);
recog_op_alt = asm_op_alt;
}
}
for (opno = 0; opno < recog_data.n_operands; opno++)
{
rtx op = recog_data.operand[opno];
- enum machine_mode mode = GET_MODE (op);
+ machine_mode mode = GET_MODE (op);
const char *p = constraints[opno];
int offset = 0;
int win = 0;
/* p is used for address_operands. When we are called by
gen_reload, no one will have checked that the address is
strictly valid, i.e., that all pseudos requiring hard regs
- have gotten them. */
- if (strict <= 0
- || (strict_memory_address_p (recog_data.operand_mode[opno],
- op)))
+ have gotten them. We also want to make sure we have a
+ valid mode. */
+ if ((GET_MODE (op) == VOIDmode
+ || SCALAR_INT_MODE_P (GET_MODE (op)))
+ && (strict <= 0
+ || (strict_memory_address_p
+ (recog_data.operand_mode[opno], op))))
win = 1;
break;
/* Every memory operand can be reloaded to fit. */
&& ((strict < 0 && MEM_P (op))
/* Before reload, accept what reload can turn
- into mem. */
+ into a mem. */
|| (strict < 0 && CONSTANT_P (op))
+ /* Before reload, accept a pseudo,
+ since LRA can turn it into a mem. */
+ || (strict < 0 && targetm.lra_p () && REG_P (op)
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)
/* During reload, accept a pseudo */
|| (reload_in_progress && REG_P (op)
&& REGNO (op) >= FIRST_PSEUDO_REGISTER)))
= recog_data.operand[funny_match[funny_match_index].this_op];
}
-#ifdef AUTO_INC_DEC
/* For operands without < or > constraints reject side-effects. */
- if (recog_data.is_asm)
+ if (AUTO_INC_DEC && recog_data.is_asm)
{
for (opno = 0; opno < recog_data.n_operands; opno++)
if (MEM_P (recog_data.operand[opno]))
break;
}
}
-#endif
+
return 1;
}
}
bool
reg_fits_class_p (const_rtx operand, reg_class_t cl, int offset,
- enum machine_mode mode)
+ machine_mode mode)
{
unsigned int regno = REGNO (operand);
split_all_insns_noflow. Return last insn in the sequence if successful,
or NULL if unsuccessful. */
-static rtx
+static rtx_insn *
split_insn (rtx_insn *insn)
{
/* Split insns here to get max fine-grain parallelism. */
rtx insn_set, last_set, note;
if (last == insn)
- return NULL_RTX;
+ return NULL;
/* If the original instruction was a single set that was known to be
equivalent to a constant, see if we can say the same about the last
void
split_all_insns (void)
{
- sbitmap blocks;
bool changed;
+ bool need_cfg_cleanup = false;
basic_block bb;
- blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap blocks (last_basic_block_for_fn (cfun));
bitmap_clear (blocks);
changed = false;
CODE_LABELS and short-out basic blocks. */
next = NEXT_INSN (insn);
finish = (insn == BB_END (bb));
+
+ /* If INSN has a REG_EH_REGION note and we split INSN, the
+ resulting split may not have/need REG_EH_REGION notes.
+
+ If that happens and INSN was the last reference to the
+ given EH region, then the EH region will become unreachable.
+ We cannot leave the unreachable blocks in the CFG as that
+ will trigger a checking failure.
+
+ So track if INSN has a REG_EH_REGION note. If so and we
+ split INSN, then trigger a CFG cleanup. */
+ rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
if (INSN_P (insn))
{
rtx set = single_set (insn);
nops then anyways. */
if (reload_completed)
delete_insn_and_edges (insn);
+ if (note)
+ need_cfg_cleanup = true;
}
else
{
{
bitmap_set_bit (blocks, bb->index);
changed = true;
+ if (note)
+ need_cfg_cleanup = true;
}
}
}
default_rtl_profile ();
if (changed)
- find_many_sub_basic_blocks (blocks);
-
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ {
+ find_many_sub_basic_blocks (blocks);
+
+ /* Splitting could drop an REG_EH_REGION if it potentially
+ trapped in its original form, but does not in its split
+ form. Consider a FLOAT_TRUNCATE which splits into a memory
+ store/load pair and -fnon-call-exceptions. */
+ if (need_cfg_cleanup)
+ cleanup_cfg (0);
+ }
- sbitmap_free (blocks);
+ checking_verify_flow_info ();
}
/* Same as split_all_insns, but do not expect CFG to be available.
return 0;
}
\f
-#ifdef HAVE_peephole2
struct peep2_insn_data
{
- rtx insn;
+ rtx_insn *insn;
regset live_before;
};
/* The number of instructions available to match a peep2. */
int peep2_current_count;
-/* A non-insn marker indicating the last insn of the block.
- The live_before regset for this element is correct, indicating
- DF_LIVE_OUT for the block. */
-#define PEEP2_EOB pc_rtx
+/* A marker indicating the last insn of the block. The live_before regset
+ for this element is correct, indicating DF_LIVE_OUT for the block. */
+#define PEEP2_EOB invalid_insn_rtx
/* Wrap N to fit into the peep2_insn_data buffer. */
does not exist. Used by the recognizer to find the next insn to match
in a multi-insn pattern. */
-rtx
+rtx_insn *
peep2_next_insn (int n)
{
gcc_assert (n <= peep2_current_count);
int
peep2_reg_dead_p (int ofs, rtx reg)
{
- int regno, n;
-
gcc_assert (ofs < MAX_INSNS_PER_PEEP2 + 1);
ofs = peep2_buf_position (peep2_current + ofs);
gcc_assert (peep2_insn_data[ofs].insn != NULL_RTX);
- regno = REGNO (reg);
- n = hard_regno_nregs[regno][GET_MODE (reg)];
- while (--n >= 0)
- if (REGNO_REG_SET_P (peep2_insn_data[ofs].live_before, regno + n))
+ unsigned int end_regno = END_REGNO (reg);
+ for (unsigned int regno = REGNO (reg); regno < end_regno; ++regno)
+ if (REGNO_REG_SET_P (peep2_insn_data[ofs].live_before, regno))
return 0;
return 1;
}
rtx
peep2_find_free_register (int from, int to, const char *class_str,
- enum machine_mode mode, HARD_REG_SET *reg_set)
+ machine_mode mode, HARD_REG_SET *reg_set)
{
enum reg_class cl;
HARD_REG_SET live;
#endif
/* Can it support the mode we need? */
- if (! HARD_REGNO_MODE_OK (regno, mode))
+ if (!targetm.hard_regno_mode_ok (regno, mode))
continue;
success = 1;
- for (j = 0; success && j < hard_regno_nregs[regno][mode]; j++)
+ for (j = 0; success && j < hard_regno_nregs (regno, mode); j++)
{
/* Don't allocate fixed registers. */
if (fixed_regs[regno + j])
/* Indicate that all slots except the last holds invalid data. */
for (i = 0; i < MAX_INSNS_PER_PEEP2; ++i)
- peep2_insn_data[i].insn = NULL_RTX;
+ peep2_insn_data[i].insn = NULL;
peep2_current_count = 0;
/* Indicate that the last slot contains live_after data. */
if the replacement is rejected. */
static rtx_insn *
-peep2_attempt (basic_block bb, rtx uncast_insn, int match_len, rtx_insn *attempt)
+peep2_attempt (basic_block bb, rtx_insn *insn, int match_len, rtx_insn *attempt)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
int i;
rtx_insn *last, *before_try, *x;
rtx eh_note, as_note;
/* If we are splitting an RTX_FRAME_RELATED_P insn, do not allow it to
match more than one insn, or to be split into more than one insn. */
- old_insn = as_a <rtx_insn *> (peep2_insn_data[peep2_current].insn);
+ old_insn = peep2_insn_data[peep2_current].insn;
if (RTX_FRAME_RELATED_P (old_insn))
{
bool any_note = false;
rtx note;
j = peep2_buf_position (peep2_current + i);
- old_insn = as_a <rtx_insn *> (peep2_insn_data[j].insn);
+ old_insn = peep2_insn_data[j].insn;
if (!CALL_P (old_insn))
continue;
was_call = true;
case REG_NORETURN:
case REG_SETJMP:
case REG_TM:
+ case REG_CALL_NOCF_CHECK:
add_reg_note (new_insn, REG_NOTE_KIND (note),
XEXP (note, 0));
break;
while (++i <= match_len)
{
j = peep2_buf_position (peep2_current + i);
- old_insn = as_a <rtx_insn *> (peep2_insn_data[j].insn);
+ old_insn = peep2_insn_data[j].insn;
gcc_assert (!CALL_P (old_insn));
}
break;
for (i = match_len; i >= 0; --i)
{
int j = peep2_buf_position (peep2_current + i);
- old_insn = as_a <rtx_insn *> (peep2_insn_data[j].insn);
+ old_insn = peep2_insn_data[j].insn;
as_note = find_reg_note (old_insn, REG_ARGS_SIZE, NULL);
if (as_note)
eh_note = find_reg_note (peep2_insn_data[i].insn, REG_EH_REGION, NULL_RTX);
/* Replace the old sequence with the new. */
- rtx_insn *peepinsn = as_a <rtx_insn *> (peep2_insn_data[i].insn);
+ rtx_insn *peepinsn = peep2_insn_data[i].insn;
last = emit_insn_after_setloc (attempt,
peep2_insn_data[i].insn,
INSN_LOCATION (peepinsn));
+ if (JUMP_P (peepinsn) && JUMP_P (last))
+ CROSSING_JUMP_P (last) = CROSSING_JUMP_P (peepinsn);
before_try = PREV_INSN (insn);
delete_insn_chain (insn, peep2_insn_data[i].insn, false);
flags);
nehe->probability = eh_edge->probability;
- nfte->probability
- = REG_BR_PROB_BASE - nehe->probability;
+ nfte->probability = nehe->probability.invert ();
peep2_do_cleanup_cfg |= purge_dead_edges (nfte->dest);
bb = nfte->src;
/* Re-insert the ARGS_SIZE notes. */
if (as_note)
- fixup_args_size_notes (before_try, last, INTVAL (XEXP (as_note, 0)));
+ fixup_args_size_notes (before_try, last, get_args_size (as_note));
/* If we generated a jump instruction, it won't have
JUMP_LABEL set. Recompute after we're done. */
add more instructions to the buffer. */
static bool
-peep2_fill_buffer (basic_block bb, rtx insn, regset live)
+peep2_fill_buffer (basic_block bb, rtx_insn *insn, regset live)
{
int pos;
COPY_REG_SET (peep2_insn_data[pos].live_before, live);
peep2_current_count++;
- df_simulate_one_insn_forwards (bb, as_a <rtx_insn *> (insn), live);
+ df_simulate_one_insn_forwards (bb, insn, live);
return true;
}
insn = BB_HEAD (bb);
for (;;)
{
- rtx_insn *attempt;
- rtx head;
+ rtx_insn *attempt, *head;
int match_len;
if (!past_end && !NONDEBUG_INSN_P (insn))
/* Match the peephole. */
head = peep2_insn_data[peep2_current].insn;
- attempt = safe_as_a <rtx_insn *> (
- peephole2_insns (PATTERN (head), head, &match_len));
+ attempt = peephole2_insns (PATTERN (head), head, &match_len);
if (attempt != NULL)
{
rtx_insn *last = peep2_attempt (bb, head, match_len, attempt);
if (peep2_do_cleanup_cfg)
cleanup_cfg (CLEANUP_CFG_CHANGED);
}
-#endif /* HAVE_peephole2 */
/* Common predicates for use with define_bypass. */
-/* True if the dependency between OUT_INSN and IN_INSN is on the store
- data not the address operand(s) of the store. IN_INSN and OUT_INSN
- must be either a single_set or a PARALLEL with SETs inside. */
+/* Helper function for store_data_bypass_p, handle just a single SET
+ IN_SET. */
-int
-store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
+static bool
+store_data_bypass_p_1 (rtx_insn *out_insn, rtx in_set)
{
- rtx out_set, in_set;
- rtx out_pat, in_pat;
- rtx out_exp, in_exp;
- int i, j;
+ if (!MEM_P (SET_DEST (in_set)))
+ return false;
- in_set = single_set (in_insn);
- if (in_set)
+ rtx out_set = single_set (out_insn);
+ if (out_set)
+ return !reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_set));
+
+ rtx out_pat = PATTERN (out_insn);
+ if (GET_CODE (out_pat) != PARALLEL)
+ return false;
+
+ for (int i = 0; i < XVECLEN (out_pat, 0); i++)
{
- if (!MEM_P (SET_DEST (in_set)))
- return false;
+ rtx out_exp = XVECEXP (out_pat, 0, i);
- out_set = single_set (out_insn);
- if (out_set)
- {
- if (reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_set)))
- return false;
- }
- else
- {
- out_pat = PATTERN (out_insn);
+ if (GET_CODE (out_exp) == CLOBBER || GET_CODE (out_exp) == USE
+ || GET_CODE (out_exp) == CLOBBER_HIGH)
+ continue;
- if (GET_CODE (out_pat) != PARALLEL)
- return false;
+ gcc_assert (GET_CODE (out_exp) == SET);
- for (i = 0; i < XVECLEN (out_pat, 0); i++)
- {
- out_exp = XVECEXP (out_pat, 0, i);
+ if (reg_mentioned_p (SET_DEST (out_exp), SET_DEST (in_set)))
+ return false;
+ }
- if (GET_CODE (out_exp) == CLOBBER)
- continue;
+ return true;
+}
- gcc_assert (GET_CODE (out_exp) == SET);
+/* True if the dependency between OUT_INSN and IN_INSN is on the store
+ data not the address operand(s) of the store. IN_INSN and OUT_INSN
+ must be either a single_set or a PARALLEL with SETs inside. */
- if (reg_mentioned_p (SET_DEST (out_exp), SET_DEST (in_set)))
- return false;
- }
- }
- }
- else
- {
- in_pat = PATTERN (in_insn);
- gcc_assert (GET_CODE (in_pat) == PARALLEL);
+int
+store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
+{
+ rtx in_set = single_set (in_insn);
+ if (in_set)
+ return store_data_bypass_p_1 (out_insn, in_set);
- for (i = 0; i < XVECLEN (in_pat, 0); i++)
- {
- in_exp = XVECEXP (in_pat, 0, i);
+ rtx in_pat = PATTERN (in_insn);
+ if (GET_CODE (in_pat) != PARALLEL)
+ return false;
- if (GET_CODE (in_exp) == CLOBBER)
- continue;
+ for (int i = 0; i < XVECLEN (in_pat, 0); i++)
+ {
+ rtx in_exp = XVECEXP (in_pat, 0, i);
- gcc_assert (GET_CODE (in_exp) == SET);
+ if (GET_CODE (in_exp) == CLOBBER || GET_CODE (in_exp) == USE
+ || GET_CODE (in_exp) == CLOBBER_HIGH)
+ continue;
- if (!MEM_P (SET_DEST (in_exp)))
- return false;
+ gcc_assert (GET_CODE (in_exp) == SET);
- out_set = single_set (out_insn);
- if (out_set)
- {
- if (reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_exp)))
- return false;
- }
- else
- {
- out_pat = PATTERN (out_insn);
- gcc_assert (GET_CODE (out_pat) == PARALLEL);
-
- for (j = 0; j < XVECLEN (out_pat, 0); j++)
- {
- out_exp = XVECEXP (out_pat, 0, j);
-
- if (GET_CODE (out_exp) == CLOBBER)
- continue;
-
- gcc_assert (GET_CODE (out_exp) == SET);
-
- if (reg_mentioned_p (SET_DEST (out_exp), SET_DEST (in_exp)))
- return false;
- }
- }
- }
+ if (!store_data_bypass_p_1 (out_insn, in_exp))
+ return false;
}
return true;
{
rtx exp = XVECEXP (out_pat, 0, i);
- if (GET_CODE (exp) == CLOBBER)
+ if (GET_CODE (exp) == CLOBBER || GET_CODE (exp) == CLOBBER_HIGH)
continue;
gcc_assert (GET_CODE (exp) == SET);
static unsigned int
rest_of_handle_peephole2 (void)
{
-#ifdef HAVE_peephole2
- peephole2_optimize ();
-#endif
+ if (HAVE_peephole2)
+ peephole2_optimize ();
+
return 0;
}
OPTGROUP_NONE, /* optinfo_flags */
TV_NONE, /* tv_id */
0, /* properties_required */
- 0, /* properties_provided */
+ PROP_rtl_split_insns, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
return new pass_split_all_insns (ctxt);
}
-static unsigned int
-rest_of_handle_split_after_reload (void)
-{
- /* If optimizing, then go ahead and split insns now. */
-#ifndef STACK_REGS
- if (optimize > 0)
-#endif
- split_all_insns ();
- return 0;
-}
-
namespace {
const pass_data pass_data_split_after_reload =
{}
/* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ /* If optimizing, then go ahead and split insns now. */
+ if (optimize > 0)
+ return true;
+
+#ifdef STACK_REGS
+ return true;
+#else
+ return false;
+#endif
+ }
+
virtual unsigned int execute (function *)
{
- return rest_of_handle_split_after_reload ();
+ split_all_insns ();
+ return 0;
}
}; // class pass_split_after_reload
}
memset (this_target_recog->x_bool_attr_masks, 0,
sizeof (this_target_recog->x_bool_attr_masks));
- for (int i = 0; i < LAST_INSN_CODE; ++i)
+ for (unsigned int i = 0; i < NUM_INSN_CODES; ++i)
if (this_target_recog->x_op_alt[i])
{
free (this_target_recog->x_op_alt[i]);