/* Subroutines used by or related to instruction recognition.
- Copyright (C) 1987-2014 Free Software Foundation, Inc.
+ Copyright (C) 1987-2021 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
+#include "target.h"
+#include "rtl.h"
#include "tree.h"
-#include "rtl-error.h"
+#include "cfghooks.h"
+#include "df.h"
+#include "memmodel.h"
#include "tm_p.h"
#include "insn-config.h"
-#include "insn-attr.h"
-#include "hard-reg-set.h"
-#include "recog.h"
#include "regs.h"
+#include "emit-rtl.h"
+#include "recog.h"
+#include "insn-attr.h"
#include "addresses.h"
-#include "expr.h"
-#include "function.h"
-#include "flags.h"
-#include "basic-block.h"
+#include "cfgrtl.h"
+#include "cfgbuild.h"
+#include "cfgcleanup.h"
#include "reload.h"
-#include "target.h"
#include "tree-pass.h"
-#include "df.h"
-#include "insn-codes.h"
-
-#ifndef STACK_PUSH_CODE
-#ifdef STACK_GROWS_DOWNWARD
-#define STACK_PUSH_CODE PRE_DEC
-#else
-#define STACK_PUSH_CODE PRE_INC
-#endif
-#endif
+#include "function-abi.h"
#ifndef STACK_POP_CODE
-#ifdef STACK_GROWS_DOWNWARD
+#if STACK_GROWS_DOWNWARD
#define STACK_POP_CODE POST_INC
#else
#define STACK_POP_CODE POST_DEC
#endif
#endif
-static void validate_replace_rtx_1 (rtx *, rtx, rtx, rtx, bool);
+static void validate_replace_rtx_1 (rtx *, rtx, rtx, rtx_insn *, bool);
static void validate_replace_src_1 (rtx *, void *);
-static rtx split_insn (rtx_insn *);
+static rtx_insn *split_insn (rtx_insn *);
struct target_recog default_target_recog;
#if SWITCHABLE_TARGET
if (reload_completed)
{
/* ??? Doh! We've not got the wrapping insn. Cook one up. */
- extract_insn (make_insn_raw (x));
- constrain_operands (1);
+ rtx_insn *insn = make_insn_raw (x);
+ extract_insn (insn);
+ constrain_operands (1, get_enabled_alternatives (insn));
return which_alternative >= 0;
}
\f
/* Static data for the next two routines. */
-typedef struct change_t
+struct change_t
{
rtx object;
int old_code;
+ int old_len;
+ bool unshare;
rtx *loc;
rtx old;
- bool unshare;
-} change_t;
+};
static change_t *changes;
static int changes_allocated;
static int num_changes = 0;
+static int temporarily_undone_changes = 0;
/* Validate a proposed change to OBJECT. LOC is the location in the rtl
- at which NEW_RTX will be placed. If OBJECT is zero, no validation is done,
- the change is simply made.
+ at which NEW_RTX will be placed. If NEW_LEN is >= 0, XVECLEN (NEW_RTX, 0)
+ will also be changed to NEW_LEN, which is no greater than the current
+ XVECLEN. If OBJECT is zero, no validation is done, the change is
+ simply made.
Two types of objects are supported: If OBJECT is a MEM, memory_address_p
will be called with the address and mode as parameters. If OBJECT is
Otherwise, perform the change and return 1. */
static bool
-validate_change_1 (rtx object, rtx *loc, rtx new_rtx, bool in_group, bool unshare)
+validate_change_1 (rtx object, rtx *loc, rtx new_rtx, bool in_group,
+ bool unshare, int new_len = -1)
{
+ gcc_assert (temporarily_undone_changes == 0);
rtx old = *loc;
- if (old == new_rtx || rtx_equal_p (old, new_rtx))
+ /* Single-element parallels aren't valid and won't match anything.
+ Replace them with the single element. */
+ if (new_len == 1 && GET_CODE (new_rtx) == PARALLEL)
+ {
+ new_rtx = XVECEXP (new_rtx, 0, 0);
+ new_len = -1;
+ }
+
+ if ((old == new_rtx || rtx_equal_p (old, new_rtx))
+ && (new_len < 0 || XVECLEN (new_rtx, 0) == new_len))
return 1;
- gcc_assert (in_group != 0 || num_changes == 0);
+ gcc_assert ((in_group != 0 || num_changes == 0)
+ && (new_len < 0 || new_rtx == *loc));
*loc = new_rtx;
changes[num_changes].object = object;
changes[num_changes].loc = loc;
changes[num_changes].old = old;
+ changes[num_changes].old_len = (new_len >= 0 ? XVECLEN (new_rtx, 0) : -1);
changes[num_changes].unshare = unshare;
+ if (new_len >= 0)
+ XVECLEN (new_rtx, 0) = new_len;
+
if (object && !MEM_P (object))
{
/* Set INSN_CODE to force rerecognition of insn. Save old code in
return validate_change_1 (object, loc, new_rtx, in_group, true);
}
+/* Change XVECLEN (*LOC, 0) to NEW_LEN. OBJECT, IN_GROUP and the return
+ value are as for validate_change_1. */
+
+bool
+validate_change_xveclen (rtx object, rtx *loc, int new_len, bool in_group)
+{
+ return validate_change_1 (object, loc, *loc, in_group, false, new_len);
+}
/* Keep X canonicalized if some changes have made it non-canonical; only
modifies the operands of X, not (for example) its code. Simplifications
Return true if anything was changed. */
bool
-canonicalize_change_group (rtx insn, rtx x)
+canonicalize_change_group (rtx_insn *insn, rtx x)
{
if (COMMUTATIVE_P (x)
&& swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
{
extract_insn (insn);
- if (! constrain_operands (1))
+ if (! constrain_operands (1, get_preferred_alternatives (insn)))
return 1;
}
changes[i].old
&& REG_P (changes[i].old)
&& asm_noperands (PATTERN (object)) > 0
- && REG_EXPR (changes[i].old) != NULL_TREE
- && DECL_ASSEMBLER_NAME_SET_P (REG_EXPR (changes[i].old))
- && DECL_REGISTER (REG_EXPR (changes[i].old)))
+ && register_asm_p (changes[i].old))
{
/* Don't allow changes of hard register operands to inline
assemblies if they have been defined as register asm ("x"). */
int i;
rtx last_object = NULL;
+ gcc_assert (temporarily_undone_changes == 0);
for (i = 0; i < num_changes; i++)
{
rtx object = changes[i].object;
void
cancel_changes (int num)
{
+ gcc_assert (temporarily_undone_changes == 0);
int i;
/* Back out all the changes. Do this in the opposite order in which
they were made. */
for (i = num_changes - 1; i >= num; i--)
{
- *changes[i].loc = changes[i].old;
+ if (changes[i].old_len >= 0)
+ XVECLEN (*changes[i].loc, 0) = changes[i].old_len;
+ else
+ *changes[i].loc = changes[i].old;
if (changes[i].object && !MEM_P (changes[i].object))
INSN_CODE (changes[i].object) = changes[i].old_code;
}
num_changes = num;
}
-/* Reduce conditional compilation elsewhere. */
-#ifndef HAVE_extv
-#define HAVE_extv 0
-#define CODE_FOR_extv CODE_FOR_nothing
-#endif
-#ifndef HAVE_extzv
-#define HAVE_extzv 0
-#define CODE_FOR_extzv CODE_FOR_nothing
-#endif
+/* Swap the status of change NUM from being applied to not being applied,
+ or vice versa. */
+
+static void
+swap_change (int num)
+{
+ if (changes[num].old_len >= 0)
+ std::swap (XVECLEN (*changes[num].loc, 0), changes[num].old_len);
+ else
+ std::swap (*changes[num].loc, changes[num].old);
+ if (changes[num].object && !MEM_P (changes[num].object))
+ std::swap (INSN_CODE (changes[num].object), changes[num].old_code);
+}
+
+/* Temporarily undo all the changes numbered NUM and up, with a view
+ to reapplying them later. The next call to the changes machinery
+ must be:
+ redo_changes (NUM)
+
+ otherwise things will end up in an invalid state. */
+
+void
+temporarily_undo_changes (int num)
+{
+ gcc_assert (temporarily_undone_changes == 0 && num <= num_changes);
+ for (int i = num_changes - 1; i >= num; i--)
+ swap_change (i);
+ temporarily_undone_changes = num_changes - num;
+}
+
+/* Redo the changes that were temporarily undone by:
+
+ temporarily_undo_changes (NUM). */
+
+void
+redo_changes (int num)
+{
+ gcc_assert (temporarily_undone_changes == num_changes - num);
+ for (int i = num; i < num_changes; ++i)
+ swap_change (i);
+ temporarily_undone_changes = 0;
+}
+
+/* Reduce conditional compilation elsewhere. */
/* A subroutine of validate_replace_rtx_1 that tries to simplify the resulting
rtx. */
static void
-simplify_while_replacing (rtx *loc, rtx to, rtx object,
- enum machine_mode op0_mode)
+simplify_while_replacing (rtx *loc, rtx to, rtx_insn *object,
+ machine_mode op0_mode)
{
rtx x = *loc;
enum rtx_code code = GET_CODE (x);
rtx new_rtx = NULL_RTX;
+ scalar_int_mode is_mode;
if (SWAPPABLE_OPERANDS_P (x)
&& swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
happen, we might just fail in some cases). */
if (MEM_P (XEXP (x, 0))
+ && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &is_mode)
&& CONST_INT_P (XEXP (x, 1))
&& CONST_INT_P (XEXP (x, 2))
&& !mode_dependent_address_p (XEXP (XEXP (x, 0), 0),
MEM_ADDR_SPACE (XEXP (x, 0)))
&& !MEM_VOLATILE_P (XEXP (x, 0)))
{
- enum machine_mode wanted_mode = VOIDmode;
- enum machine_mode is_mode = GET_MODE (XEXP (x, 0));
int pos = INTVAL (XEXP (x, 2));
-
- if (GET_CODE (x) == ZERO_EXTRACT && HAVE_extzv)
- {
- wanted_mode = insn_data[CODE_FOR_extzv].operand[1].mode;
- if (wanted_mode == VOIDmode)
- wanted_mode = word_mode;
- }
- else if (GET_CODE (x) == SIGN_EXTRACT && HAVE_extv)
- {
- wanted_mode = insn_data[CODE_FOR_extv].operand[1].mode;
- if (wanted_mode == VOIDmode)
- wanted_mode = word_mode;
- }
+ machine_mode new_mode = is_mode;
+ if (GET_CODE (x) == ZERO_EXTRACT && targetm.have_extzv ())
+ new_mode = insn_data[targetm.code_for_extzv].operand[1].mode;
+ else if (GET_CODE (x) == SIGN_EXTRACT && targetm.have_extv ())
+ new_mode = insn_data[targetm.code_for_extv].operand[1].mode;
+ scalar_int_mode wanted_mode = (new_mode == VOIDmode
+ ? word_mode
+ : as_a <scalar_int_mode> (new_mode));
/* If we have a narrower mode, we can do something. */
- if (wanted_mode != VOIDmode
- && GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ if (GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
{
int offset = pos / BITS_PER_UNIT;
rtx newmem;
validate_change passing OBJECT. */
static void
-validate_replace_rtx_1 (rtx *loc, rtx from, rtx to, rtx object,
+validate_replace_rtx_1 (rtx *loc, rtx from, rtx to, rtx_insn *object,
bool simplify)
{
int i, j;
const char *fmt;
rtx x = *loc;
enum rtx_code code;
- enum machine_mode op0_mode = VOIDmode;
+ machine_mode op0_mode = VOIDmode;
int prev_changes = num_changes;
if (!x)
if INSN is still valid. */
int
-validate_replace_rtx_subexp (rtx from, rtx to, rtx insn, rtx *loc)
+validate_replace_rtx_subexp (rtx from, rtx to, rtx_insn *insn, rtx *loc)
{
validate_replace_rtx_1 (loc, from, to, insn, true);
return apply_change_group ();
changes have been made, validate by seeing if INSN is still valid. */
int
-validate_replace_rtx (rtx from, rtx to, rtx insn)
+validate_replace_rtx (rtx from, rtx to, rtx_insn *insn)
{
validate_replace_rtx_1 (&PATTERN (insn), from, to, insn, true);
return apply_change_group ();
validate_replace_rtx_part (from, to, &PATTERN (insn), insn). */
int
-validate_replace_rtx_part (rtx from, rtx to, rtx *where, rtx insn)
+validate_replace_rtx_part (rtx from, rtx to, rtx *where, rtx_insn *insn)
{
validate_replace_rtx_1 (where, from, to, insn, true);
return apply_change_group ();
/* Same as above, but do not simplify rtx afterwards. */
int
validate_replace_rtx_part_nosimplify (rtx from, rtx to, rtx *where,
- rtx insn)
+ rtx_insn *insn)
{
validate_replace_rtx_1 (where, from, to, insn, false);
return apply_change_group ();
will replace in REG_EQUAL and REG_EQUIV notes. */
void
-validate_replace_rtx_group (rtx from, rtx to, rtx insn)
+validate_replace_rtx_group (rtx from, rtx to, rtx_insn *insn)
{
rtx note;
validate_replace_rtx_1 (&PATTERN (insn), from, to, insn, true);
{
rtx from; /* Old RTX */
rtx to; /* New RTX */
- rtx insn; /* Insn in which substitution is occurring. */
+ rtx_insn *insn; /* Insn in which substitution is occurring. */
};
static void
SET_DESTs. */
void
-validate_replace_src_group (rtx from, rtx to, rtx insn)
+validate_replace_src_group (rtx from, rtx to, rtx_insn *insn)
{
struct validate_replace_src_data d;
pattern and return true if something was simplified. */
bool
-validate_simplify_insn (rtx insn)
+validate_simplify_insn (rtx_insn *insn)
{
int i;
rtx pat = NULL;
}
return ((num_changes_pending () > 0) && (apply_change_group () > 0));
}
-\f
-#ifdef HAVE_cc0
-/* Return 1 if the insn using CC0 set by INSN does not contain
- any ordered tests applied to the condition codes.
- EQ and NE tests do not count. */
-int
-next_insn_tests_no_inequality (rtx insn)
+/* Try to process the address of memory expression MEM. Return true on
+ success; leave the caller to clean up on failure. */
+
+bool
+insn_propagation::apply_to_mem_1 (rtx mem)
{
- rtx next = next_cc0_user (insn);
+ auto old_num_changes = num_validated_changes ();
+ mem_depth += 1;
+ bool res = apply_to_rvalue_1 (&XEXP (mem, 0));
+ mem_depth -= 1;
+ if (!res)
+ return false;
- /* If there is no next insn, we have to take the conservative choice. */
- if (next == 0)
- return 0;
+ if (old_num_changes != num_validated_changes ()
+ && should_check_mems
+ && !check_mem (old_num_changes, mem))
+ return false;
- return (INSN_P (next)
- && ! inequality_comparisons_p (PATTERN (next)));
+ return true;
}
-#endif
-\f
-/* Return 1 if OP is a valid general operand for machine mode MODE.
+
+/* Try to process the rvalue expression at *LOC. Return true on success;
+ leave the caller to clean up on failure. */
+
+bool
+insn_propagation::apply_to_rvalue_1 (rtx *loc)
+{
+ rtx x = *loc;
+ enum rtx_code code = GET_CODE (x);
+ machine_mode mode = GET_MODE (x);
+
+ auto old_num_changes = num_validated_changes ();
+ if (from && GET_CODE (x) == GET_CODE (from) && rtx_equal_p (x, from))
+ {
+ /* Don't replace register asms in asm statements; we mustn't
+ change the user's register allocation. */
+ if (REG_P (x)
+ && HARD_REGISTER_P (x)
+ && register_asm_p (x)
+ && asm_noperands (PATTERN (insn)) > 0)
+ return false;
+
+ if (should_unshare)
+ validate_unshare_change (insn, loc, to, 1);
+ else
+ validate_change (insn, loc, to, 1);
+ if (mem_depth && !REG_P (to) && !CONSTANT_P (to))
+ {
+ /* We're substituting into an address, but TO will have the
+ form expected outside an address. Canonicalize it if
+ necessary. */
+ insn_propagation subprop (insn);
+ subprop.mem_depth += 1;
+ if (!subprop.apply_to_rvalue (loc))
+ gcc_unreachable ();
+ if (should_unshare
+ && num_validated_changes () != old_num_changes + 1)
+ {
+ /* TO is owned by someone else, so create a copy and
+ return TO to its original form. */
+ rtx to = copy_rtx (*loc);
+ cancel_changes (old_num_changes);
+ validate_change (insn, loc, to, 1);
+ }
+ }
+ num_replacements += 1;
+ should_unshare = true;
+ result_flags |= UNSIMPLIFIED;
+ return true;
+ }
+
+ /* Recursively apply the substitution and see if we can simplify
+ the result. This specifically shouldn't use simplify_gen_* for
+ speculative simplifications, since we want to avoid generating new
+ expressions where possible. */
+ auto old_result_flags = result_flags;
+ rtx newx = NULL_RTX;
+ bool recurse_p = false;
+ switch (GET_RTX_CLASS (code))
+ {
+ case RTX_UNARY:
+ {
+ machine_mode op0_mode = GET_MODE (XEXP (x, 0));
+ if (!apply_to_rvalue_1 (&XEXP (x, 0)))
+ return false;
+ if (from && old_num_changes == num_validated_changes ())
+ return true;
+
+ newx = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
+ break;
+ }
+
+ case RTX_BIN_ARITH:
+ case RTX_COMM_ARITH:
+ {
+ if (!apply_to_rvalue_1 (&XEXP (x, 0))
+ || !apply_to_rvalue_1 (&XEXP (x, 1)))
+ return false;
+ if (from && old_num_changes == num_validated_changes ())
+ return true;
+
+ if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
+ && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
+ newx = simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
+ else
+ newx = simplify_binary_operation (code, mode,
+ XEXP (x, 0), XEXP (x, 1));
+ break;
+ }
+
+ case RTX_COMPARE:
+ case RTX_COMM_COMPARE:
+ {
+ machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
+ ? GET_MODE (XEXP (x, 0))
+ : GET_MODE (XEXP (x, 1)));
+ if (!apply_to_rvalue_1 (&XEXP (x, 0))
+ || !apply_to_rvalue_1 (&XEXP (x, 1)))
+ return false;
+ if (from && old_num_changes == num_validated_changes ())
+ return true;
+
+ newx = simplify_relational_operation (code, mode, op_mode,
+ XEXP (x, 0), XEXP (x, 1));
+ break;
+ }
+
+ case RTX_TERNARY:
+ case RTX_BITFIELD_OPS:
+ {
+ machine_mode op0_mode = GET_MODE (XEXP (x, 0));
+ if (!apply_to_rvalue_1 (&XEXP (x, 0))
+ || !apply_to_rvalue_1 (&XEXP (x, 1))
+ || !apply_to_rvalue_1 (&XEXP (x, 2)))
+ return false;
+ if (from && old_num_changes == num_validated_changes ())
+ return true;
+
+ newx = simplify_ternary_operation (code, mode, op0_mode,
+ XEXP (x, 0), XEXP (x, 1),
+ XEXP (x, 2));
+ break;
+ }
+
+ case RTX_EXTRA:
+ if (code == SUBREG)
+ {
+ machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
+ if (!apply_to_rvalue_1 (&SUBREG_REG (x)))
+ return false;
+ if (from && old_num_changes == num_validated_changes ())
+ return true;
+
+ rtx inner = SUBREG_REG (x);
+ newx = simplify_subreg (mode, inner, inner_mode, SUBREG_BYTE (x));
+ /* Reject the same cases that simplify_gen_subreg would. */
+ if (!newx
+ && (GET_CODE (inner) == SUBREG
+ || GET_CODE (inner) == CONCAT
+ || GET_MODE (inner) == VOIDmode
+ || !validate_subreg (mode, inner_mode,
+ inner, SUBREG_BYTE (x))))
+ {
+ failure_reason = "would create an invalid subreg";
+ return false;
+ }
+ break;
+ }
+ else
+ recurse_p = true;
+ break;
+
+ case RTX_OBJ:
+ if (code == LO_SUM)
+ {
+ if (!apply_to_rvalue_1 (&XEXP (x, 0))
+ || !apply_to_rvalue_1 (&XEXP (x, 1)))
+ return false;
+ if (from && old_num_changes == num_validated_changes ())
+ return true;
+
+ /* (lo_sum (high x) y) -> y where x and y have the same base. */
+ rtx op0 = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+ if (GET_CODE (op0) == HIGH)
+ {
+ rtx base0, base1, offset0, offset1;
+ split_const (XEXP (op0, 0), &base0, &offset0);
+ split_const (op1, &base1, &offset1);
+ if (rtx_equal_p (base0, base1))
+ newx = op1;
+ }
+ }
+ else if (code == REG)
+ {
+ if (from && REG_P (from) && reg_overlap_mentioned_p (x, from))
+ {
+ failure_reason = "inexact register overlap";
+ return false;
+ }
+ }
+ else if (code == MEM)
+ return apply_to_mem_1 (x);
+ else
+ recurse_p = true;
+ break;
+
+ case RTX_CONST_OBJ:
+ break;
+
+ case RTX_AUTOINC:
+ if (from && reg_overlap_mentioned_p (XEXP (x, 0), from))
+ {
+ failure_reason = "is subject to autoinc";
+ return false;
+ }
+ recurse_p = true;
+ break;
+
+ case RTX_MATCH:
+ case RTX_INSN:
+ gcc_unreachable ();
+ }
+
+ if (recurse_p)
+ {
+ const char *fmt = GET_RTX_FORMAT (code);
+ for (int i = 0; fmt[i]; i++)
+ switch (fmt[i])
+ {
+ case 'E':
+ for (int j = 0; j < XVECLEN (x, i); j++)
+ if (!apply_to_rvalue_1 (&XVECEXP (x, i, j)))
+ return false;
+ break;
+
+ case 'e':
+ if (XEXP (x, i) && !apply_to_rvalue_1 (&XEXP (x, i)))
+ return false;
+ break;
+ }
+ }
+ else if (newx && !rtx_equal_p (x, newx))
+ {
+ /* All substitutions made by OLD_NUM_CHANGES onwards have been
+ simplified. */
+ result_flags = ((result_flags & ~UNSIMPLIFIED)
+ | (old_result_flags & UNSIMPLIFIED));
+
+ if (should_note_simplifications)
+ note_simplification (old_num_changes, old_result_flags, x, newx);
+
+ /* There's no longer any point unsharing the substitutions made
+ for subexpressions, since we'll just copy this one instead. */
+ bool unshare = false;
+ for (int i = old_num_changes; i < num_changes; ++i)
+ {
+ unshare |= changes[i].unshare;
+ changes[i].unshare = false;
+ }
+ if (unshare)
+ validate_unshare_change (insn, loc, newx, 1);
+ else
+ validate_change (insn, loc, newx, 1);
+ }
+
+ return true;
+}
+
+/* Try to process the lvalue expression at *LOC. Return true on success;
+ leave the caller to clean up on failure. */
+
+bool
+insn_propagation::apply_to_lvalue_1 (rtx dest)
+{
+ rtx old_dest = dest;
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ {
+ if (GET_CODE (dest) == ZERO_EXTRACT
+ && (!apply_to_rvalue_1 (&XEXP (dest, 1))
+ || !apply_to_rvalue_1 (&XEXP (dest, 2))))
+ return false;
+ dest = XEXP (dest, 0);
+ }
+
+ if (MEM_P (dest))
+ return apply_to_mem_1 (dest);
+
+ /* Check whether the substitution is safe in the presence of this lvalue. */
+ if (!from
+ || dest == old_dest
+ || !REG_P (dest)
+ || !reg_overlap_mentioned_p (dest, from))
+ return true;
+
+ if (SUBREG_P (old_dest)
+ && SUBREG_REG (old_dest) == dest
+ && !read_modify_subreg_p (old_dest))
+ return true;
+
+ failure_reason = "is part of a read-write destination";
+ return false;
+}
+
+/* Try to process the instruction pattern at *LOC. Return true on success;
+ leave the caller to clean up on failure. */
+
+bool
+insn_propagation::apply_to_pattern_1 (rtx *loc)
+{
+ rtx body = *loc;
+ switch (GET_CODE (body))
+ {
+ case COND_EXEC:
+ return (apply_to_rvalue_1 (&COND_EXEC_TEST (body))
+ && apply_to_pattern_1 (&COND_EXEC_CODE (body)));
+
+ case PARALLEL:
+ {
+ int last = XVECLEN (body, 0) - 1;
+ for (int i = 0; i < last; ++i)
+ if (!apply_to_pattern_1 (&XVECEXP (body, 0, i)))
+ return false;
+ return apply_to_pattern_1 (&XVECEXP (body, 0, last));
+ }
+
+ case ASM_OPERANDS:
+ for (int i = 0, len = ASM_OPERANDS_INPUT_LENGTH (body); i < len; ++i)
+ if (!apply_to_rvalue_1 (&ASM_OPERANDS_INPUT (body, i)))
+ return false;
+ return true;
+
+ case CLOBBER:
+ return apply_to_lvalue_1 (XEXP (body, 0));
+
+ case SET:
+ return (apply_to_lvalue_1 (SET_DEST (body))
+ && apply_to_rvalue_1 (&SET_SRC (body)));
+
+ default:
+ /* All the other possibilities never store and can use a normal
+ rtx walk. This includes:
+
+ - USE
+ - TRAP_IF
+ - PREFETCH
+ - UNSPEC
+ - UNSPEC_VOLATILE. */
+ return apply_to_rvalue_1 (loc);
+ }
+}
+
+/* Apply this insn_propagation object's simplification or substitution
+ to the instruction pattern at LOC. */
+
+bool
+insn_propagation::apply_to_pattern (rtx *loc)
+{
+ unsigned int num_changes = num_validated_changes ();
+ bool res = apply_to_pattern_1 (loc);
+ if (!res)
+ cancel_changes (num_changes);
+ return res;
+}
+
+/* Apply this insn_propagation object's simplification or substitution
+ to the rvalue expression at LOC. */
+
+bool
+insn_propagation::apply_to_rvalue (rtx *loc)
+{
+ unsigned int num_changes = num_validated_changes ();
+ bool res = apply_to_rvalue_1 (loc);
+ if (!res)
+ cancel_changes (num_changes);
+ return res;
+}
+
+/* Check whether INSN matches a specific alternative of an .md pattern. */
+
+bool
+valid_insn_p (rtx_insn *insn)
+{
+ recog_memoized (insn);
+ if (INSN_CODE (insn) < 0)
+ return false;
+ extract_insn (insn);
+ /* We don't know whether the insn will be in code that is optimized
+ for size or speed, so consider all enabled alternatives. */
+ if (!constrain_operands (1, get_enabled_alternatives (insn)))
+ return false;
+ return true;
+}
+
+/* Return true if OP is a valid general operand for machine mode MODE.
This is either a register reference, a memory reference,
or a constant. In the case of a memory reference, the address
is checked for general validity for the target machine.
The main use of this function is as a predicate in match_operand
expressions in the machine description. */
-int
-general_operand (rtx op, enum machine_mode mode)
+bool
+general_operand (rtx op, machine_mode mode)
{
enum rtx_code code = GET_CODE (op);
if (GET_MODE (op) == VOIDmode && mode != VOIDmode
&& GET_MODE_CLASS (mode) != MODE_INT
&& GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
- return 0;
+ return false;
if (CONST_INT_P (op)
&& mode != VOIDmode
&& trunc_int_for_mode (INTVAL (op), mode) != INTVAL (op))
- return 0;
+ return false;
if (CONSTANT_P (op))
return ((GET_MODE (op) == VOIDmode || GET_MODE (op) == mode
OP's mode must match MODE if MODE specifies a mode. */
if (GET_MODE (op) != mode)
- return 0;
+ return false;
if (code == SUBREG)
{
However, we must allow them after reload so that they can
get cleaned up by cleanup_subreg_operands. */
if (!reload_completed && MEM_P (sub)
- && GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (sub)))
- return 0;
+ && paradoxical_subreg_p (op))
+ return false;
#endif
/* Avoid memories with nonzero SUBREG_BYTE, as offsetting the memory
may result in incorrect reference. We should simplify all valid
might be called from cleanup_subreg_operands.
??? This is a kludge. */
- if (!reload_completed && SUBREG_BYTE (op) != 0
+ if (!reload_completed
+ && maybe_ne (SUBREG_BYTE (op), 0)
&& MEM_P (sub))
- return 0;
+ return false;
-#ifdef CANNOT_CHANGE_MODE_CLASS
if (REG_P (sub)
&& REGNO (sub) < FIRST_PSEUDO_REGISTER
- && REG_CANNOT_CHANGE_MODE_P (REGNO (sub), GET_MODE (sub), mode)
+ && !REG_CAN_CHANGE_MODE_P (REGNO (sub), GET_MODE (sub), mode)
&& GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_INT
&& GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_FLOAT
/* LRA can generate some invalid SUBREGS just for matched
operand reload presentation. LRA needs to treat them as
valid. */
&& ! LRA_SUBREG_P (op))
- return 0;
-#endif
+ return false;
/* FLOAT_MODE subregs can't be paradoxical. Combine will occasionally
create such rtl, and we must reject it. */
size of floating point mode can be less than the integer
mode. */
&& ! lra_in_progress
- && GET_MODE_SIZE (GET_MODE (op)) > GET_MODE_SIZE (GET_MODE (sub)))
- return 0;
+ && paradoxical_subreg_p (op))
+ return false;
op = sub;
code = GET_CODE (op);
rtx y = XEXP (op, 0);
if (! volatile_ok && MEM_VOLATILE_P (op))
- return 0;
+ return false;
/* Use the mem's mode, since it will be reloaded thus. LRA can
generate move insn with invalid addresses which is made valid
transformations. */
if (lra_in_progress
|| memory_address_addr_space_p (GET_MODE (op), y, MEM_ADDR_SPACE (op)))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
\f
-/* Return 1 if OP is a valid memory address for a memory reference
+/* Return true if OP is a valid memory address for a memory reference
of mode MODE.
The main use of this function is as a predicate in match_operand
expressions in the machine description. */
-int
-address_operand (rtx op, enum machine_mode mode)
+bool
+address_operand (rtx op, machine_mode mode)
{
+ /* Wrong mode for an address expr. */
+ if (GET_MODE (op) != VOIDmode
+ && ! SCALAR_INT_MODE_P (GET_MODE (op)))
+ return false;
+
return memory_address_p (mode, op);
}
-/* Return 1 if OP is a register reference of mode MODE.
+/* Return true if OP is a register reference of mode MODE.
If MODE is VOIDmode, accept a register in any mode.
The main use of this function is as a predicate in match_operand
expressions in the machine description. */
-int
-register_operand (rtx op, enum machine_mode mode)
+bool
+register_operand (rtx op, machine_mode mode)
{
if (GET_CODE (op) == SUBREG)
{
but currently it does result from (SUBREG (REG)...) where the
reg went on the stack.) */
if (!REG_P (sub) && (reload_completed || !MEM_P (sub)))
- return 0;
+ return false;
}
else if (!REG_P (op))
- return 0;
+ return false;
return general_operand (op, mode);
}
-/* Return 1 for a register in Pmode; ignore the tested mode. */
+/* Return true for a register in Pmode; ignore the tested mode. */
-int
-pmode_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+bool
+pmode_register_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
{
return register_operand (op, Pmode);
}
-/* Return 1 if OP should match a MATCH_SCRATCH, i.e., if it is a SCRATCH
+/* Return true if OP should match a MATCH_SCRATCH, i.e., if it is a SCRATCH
or a hard register. */
-int
-scratch_operand (rtx op, enum machine_mode mode)
+bool
+scratch_operand (rtx op, machine_mode mode)
{
if (GET_MODE (op) != mode && mode != VOIDmode)
- return 0;
+ return false;
return (GET_CODE (op) == SCRATCH
|| (REG_P (op)
- && (lra_in_progress || REGNO (op) < FIRST_PSEUDO_REGISTER)));
+ && (lra_in_progress
+ || (REGNO (op) < FIRST_PSEUDO_REGISTER
+ && REGNO_REG_CLASS (REGNO (op)) != NO_REGS))));
}
-/* Return 1 if OP is a valid immediate operand for mode MODE.
+/* Return true if OP is a valid immediate operand for mode MODE.
The main use of this function is as a predicate in match_operand
expressions in the machine description. */
-int
-immediate_operand (rtx op, enum machine_mode mode)
+bool
+immediate_operand (rtx op, machine_mode mode)
{
/* Don't accept CONST_INT or anything similar
if the caller wants something floating. */
if (GET_MODE (op) == VOIDmode && mode != VOIDmode
&& GET_MODE_CLASS (mode) != MODE_INT
&& GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
- return 0;
+ return false;
if (CONST_INT_P (op)
&& mode != VOIDmode
&& trunc_int_for_mode (INTVAL (op), mode) != INTVAL (op))
- return 0;
+ return false;
return (CONSTANT_P (op)
&& (GET_MODE (op) == mode || mode == VOIDmode
: mode, op));
}
-/* Returns 1 if OP is an operand that is a CONST_INT of mode MODE. */
+/* Return true if OP is an operand that is a CONST_INT of mode MODE. */
-int
-const_int_operand (rtx op, enum machine_mode mode)
+bool
+const_int_operand (rtx op, machine_mode mode)
{
if (!CONST_INT_P (op))
- return 0;
+ return false;
if (mode != VOIDmode
&& trunc_int_for_mode (INTVAL (op), mode) != INTVAL (op))
- return 0;
+ return false;
- return 1;
+ return true;
}
#if TARGET_SUPPORTS_WIDE_INT
-/* Returns 1 if OP is an operand that is a CONST_INT or CONST_WIDE_INT
+/* Return true if OP is an operand that is a CONST_INT or CONST_WIDE_INT
of mode MODE. */
-int
-const_scalar_int_operand (rtx op, enum machine_mode mode)
+bool
+const_scalar_int_operand (rtx op, machine_mode mode)
{
if (!CONST_SCALAR_INT_P (op))
- return 0;
+ return false;
if (CONST_INT_P (op))
return const_int_operand (op, mode);
if (mode != VOIDmode)
{
- int prec = GET_MODE_PRECISION (mode);
- int bitsize = GET_MODE_BITSIZE (mode);
+ scalar_int_mode int_mode = as_a <scalar_int_mode> (mode);
+ int prec = GET_MODE_PRECISION (int_mode);
+ int bitsize = GET_MODE_BITSIZE (int_mode);
if (CONST_WIDE_INT_NUNITS (op) * HOST_BITS_PER_WIDE_INT > bitsize)
- return 0;
+ return false;
if (prec == bitsize)
- return 1;
+ return true;
else
{
/* Multiword partial int. */
return (sext_hwi (x, prec & (HOST_BITS_PER_WIDE_INT - 1)) == x);
}
}
- return 1;
+ return true;
}
-/* Returns 1 if OP is an operand that is a constant integer or constant
+/* Return true if OP is an operand that is a constant integer or constant
floating-point number of MODE. */
-int
-const_double_operand (rtx op, enum machine_mode mode)
+bool
+const_double_operand (rtx op, machine_mode mode)
{
return (GET_CODE (op) == CONST_DOUBLE)
&& (GET_MODE (op) == mode || mode == VOIDmode);
}
#else
-/* Returns 1 if OP is an operand that is a constant integer or constant
+/* Return true if OP is an operand that is a constant integer or constant
floating-point number of MODE. */
-int
-const_double_operand (rtx op, enum machine_mode mode)
+bool
+const_double_operand (rtx op, machine_mode mode)
{
/* Don't accept CONST_INT or anything similar
if the caller wants something floating. */
if (GET_MODE (op) == VOIDmode && mode != VOIDmode
&& GET_MODE_CLASS (mode) != MODE_INT
&& GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
- return 0;
+ return false;
return ((CONST_DOUBLE_P (op) || CONST_INT_P (op))
&& (mode == VOIDmode || GET_MODE (op) == mode
|| GET_MODE (op) == VOIDmode));
}
#endif
-/* Return 1 if OP is a general operand that is not an immediate
+/* Return true if OP is a general operand that is not an immediate
operand of mode MODE. */
-int
-nonimmediate_operand (rtx op, enum machine_mode mode)
+bool
+nonimmediate_operand (rtx op, machine_mode mode)
{
return (general_operand (op, mode) && ! CONSTANT_P (op));
}
-/* Return 1 if OP is a register reference or immediate value of mode MODE. */
+/* Return true if OP is a register reference or
+ immediate value of mode MODE. */
-int
-nonmemory_operand (rtx op, enum machine_mode mode)
+bool
+nonmemory_operand (rtx op, machine_mode mode)
{
if (CONSTANT_P (op))
return immediate_operand (op, mode);
return register_operand (op, mode);
}
-/* Return 1 if OP is a valid operand that stands for pushing a
+/* Return true if OP is a valid operand that stands for pushing a
value of mode MODE onto the stack.
The main use of this function is as a predicate in match_operand
expressions in the machine description. */
-int
-push_operand (rtx op, enum machine_mode mode)
+bool
+push_operand (rtx op, machine_mode mode)
{
- unsigned int rounded_size = GET_MODE_SIZE (mode);
-
-#ifdef PUSH_ROUNDING
- rounded_size = PUSH_ROUNDING (rounded_size);
-#endif
-
if (!MEM_P (op))
- return 0;
+ return false;
if (mode != VOIDmode && GET_MODE (op) != mode)
- return 0;
+ return false;
+
+ poly_int64 rounded_size = GET_MODE_SIZE (mode);
+
+#ifdef PUSH_ROUNDING
+ rounded_size = PUSH_ROUNDING (MACRO_INT (rounded_size));
+#endif
op = XEXP (op, 0);
- if (rounded_size == GET_MODE_SIZE (mode))
+ if (known_eq (rounded_size, GET_MODE_SIZE (mode)))
{
if (GET_CODE (op) != STACK_PUSH_CODE)
- return 0;
+ return false;
}
else
{
+ poly_int64 offset;
if (GET_CODE (op) != PRE_MODIFY
|| GET_CODE (XEXP (op, 1)) != PLUS
|| XEXP (XEXP (op, 1), 0) != XEXP (op, 0)
- || !CONST_INT_P (XEXP (XEXP (op, 1), 1))
-#ifdef STACK_GROWS_DOWNWARD
- || INTVAL (XEXP (XEXP (op, 1), 1)) != - (int) rounded_size
-#else
- || INTVAL (XEXP (XEXP (op, 1), 1)) != (int) rounded_size
-#endif
- )
- return 0;
+ || !poly_int_rtx_p (XEXP (XEXP (op, 1), 1), &offset)
+ || (STACK_GROWS_DOWNWARD
+ ? maybe_ne (offset, -rounded_size)
+ : maybe_ne (offset, rounded_size)))
+ return false;
}
return XEXP (op, 0) == stack_pointer_rtx;
}
-/* Return 1 if OP is a valid operand that stands for popping a
+/* Return true if OP is a valid operand that stands for popping a
value of mode MODE off the stack.
The main use of this function is as a predicate in match_operand
expressions in the machine description. */
-int
-pop_operand (rtx op, enum machine_mode mode)
+bool
+pop_operand (rtx op, machine_mode mode)
{
if (!MEM_P (op))
- return 0;
+ return false;
if (mode != VOIDmode && GET_MODE (op) != mode)
- return 0;
+ return false;
op = XEXP (op, 0);
if (GET_CODE (op) != STACK_POP_CODE)
- return 0;
+ return false;
return XEXP (op, 0) == stack_pointer_rtx;
}
-/* Return 1 if ADDR is a valid memory address
+/* Return true if ADDR is a valid memory address
for mode MODE in address space AS. */
-int
-memory_address_addr_space_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+bool
+memory_address_addr_space_p (machine_mode mode ATTRIBUTE_UNUSED,
rtx addr, addr_space_t as)
{
#ifdef GO_IF_LEGITIMATE_ADDRESS
gcc_assert (ADDR_SPACE_GENERIC_P (as));
GO_IF_LEGITIMATE_ADDRESS (mode, addr, win);
- return 0;
+ return false;
win:
- return 1;
+ return true;
#else
return targetm.addr_space.legitimate_address_p (mode, addr, 0, as);
#endif
}
-/* Return 1 if OP is a valid memory reference with mode MODE,
+/* Return true if OP is a valid memory reference with mode MODE,
including a valid address.
The main use of this function is as a predicate in match_operand
expressions in the machine description. */
-int
-memory_operand (rtx op, enum machine_mode mode)
+bool
+memory_operand (rtx op, machine_mode mode)
{
rtx inner;
return MEM_P (op) && general_operand (op, mode);
if (mode != VOIDmode && GET_MODE (op) != mode)
- return 0;
+ return false;
inner = op;
if (GET_CODE (inner) == SUBREG)
return (MEM_P (inner) && general_operand (op, mode));
}
-/* Return 1 if OP is a valid indirect memory reference with mode MODE;
+/* Return true if OP is a valid indirect memory reference with mode MODE;
that is, a memory reference whose address is a general_operand. */
-int
-indirect_operand (rtx op, enum machine_mode mode)
+bool
+indirect_operand (rtx op, machine_mode mode)
{
/* Before reload, a SUBREG isn't in memory (see memory_operand, above). */
if (! reload_completed
&& GET_CODE (op) == SUBREG && MEM_P (SUBREG_REG (op)))
{
- int offset = SUBREG_BYTE (op);
- rtx inner = SUBREG_REG (op);
-
if (mode != VOIDmode && GET_MODE (op) != mode)
- return 0;
+ return false;
/* The only way that we can have a general_operand as the resulting
address is if OFFSET is zero and the address already is an operand
or if the address is (plus Y (const_int -OFFSET)) and Y is an
operand. */
-
- return ((offset == 0 && general_operand (XEXP (inner, 0), Pmode))
- || (GET_CODE (XEXP (inner, 0)) == PLUS
- && CONST_INT_P (XEXP (XEXP (inner, 0), 1))
- && INTVAL (XEXP (XEXP (inner, 0), 1)) == -offset
- && general_operand (XEXP (XEXP (inner, 0), 0), Pmode)));
+ poly_int64 offset;
+ rtx addr = strip_offset (XEXP (SUBREG_REG (op), 0), &offset);
+ return (known_eq (offset + SUBREG_BYTE (op), 0)
+ && general_operand (addr, Pmode));
}
return (MEM_P (op)
&& general_operand (XEXP (op, 0), Pmode));
}
-/* Return 1 if this is an ordered comparison operator (not including
+/* Return true if this is an ordered comparison operator (not including
ORDERED and UNORDERED). */
-int
-ordered_comparison_operator (rtx op, enum machine_mode mode)
+bool
+ordered_comparison_operator (rtx op, machine_mode mode)
{
if (mode != VOIDmode && GET_MODE (op) != mode)
return false;
}
}
-/* Return 1 if this is a comparison operator. This allows the use of
+/* Return true if this is a comparison operator. This allows the use of
MATCH_OPERATOR to recognize all the branch insns. */
-int
-comparison_operator (rtx op, enum machine_mode mode)
+bool
+comparison_operator (rtx op, machine_mode mode)
{
return ((mode == VOIDmode || GET_MODE (op) == mode)
&& COMPARISON_P (op));
/* If BODY is an insn body that uses ASM_OPERANDS,
return the number of operands (both input and output) in the insn.
+ If BODY is an insn body that uses ASM_INPUT with CLOBBERS in PARALLEL,
+ return 0.
Otherwise return -1. */
int
asm_noperands (const_rtx body)
{
rtx asm_op = extract_asm_operands (CONST_CAST_RTX (body));
- int n_sets = 0;
+ int i, n_sets = 0;
if (asm_op == NULL)
- return -1;
+ {
+ if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) >= 2
+ && GET_CODE (XVECEXP (body, 0, 0)) == ASM_INPUT)
+ {
+ /* body is [(asm_input ...) (clobber (reg ...))...]. */
+ for (i = XVECLEN (body, 0) - 1; i > 0; i--)
+ if (GET_CODE (XVECEXP (body, 0, i)) != CLOBBER)
+ return -1;
+ return 0;
+ }
+ return -1;
+ }
if (GET_CODE (body) == SET)
n_sets = 1;
else if (GET_CODE (body) == PARALLEL)
{
- int i;
if (GET_CODE (XVECEXP (body, 0, 0)) == SET)
{
/* Multiple output operands, or 1 output plus some clobbers:
the locations of the operands within the insn into the vector OPERAND_LOCS,
and the constraints for the operands into CONSTRAINTS.
Write the modes of the operands into MODES.
+ Write the location info into LOC.
Return the assembler-template.
+ If BODY is an insn body that uses ASM_INPUT with CLOBBERS in PARALLEL,
+ return the basic assembly string.
- If MODES, OPERAND_LOCS, CONSTRAINTS or OPERANDS is 0,
+ If LOC, MODES, OPERAND_LOCS, CONSTRAINTS or OPERANDS is 0,
we don't store that info. */
const char *
decode_asm_operands (rtx body, rtx *operands, rtx **operand_locs,
- const char **constraints, enum machine_mode *modes,
+ const char **constraints, machine_mode *modes,
location_t *loc)
{
int nbase = 0, n, i;
{
if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
break; /* Past last SET */
+ gcc_assert (GET_CODE (XVECEXP (body, 0, i)) == SET);
if (operands)
operands[i] = SET_DEST (XVECEXP (body, 0, i));
if (operand_locs)
}
nbase = i;
}
+ else if (GET_CODE (asmop) == ASM_INPUT)
+ {
+ if (loc)
+ *loc = ASM_INPUT_SOURCE_LOCATION (asmop);
+ return XSTR (asmop, 0);
+ }
break;
}
asm_operand_ok (rtx op, const char *constraint, const char **constraints)
{
int result = 0;
-#ifdef AUTO_INC_DEC
bool incdec_ok = false;
-#endif
/* Use constrain_operands after reload. */
gcc_assert (!reload_completed);
result = 1;
break;
-#ifdef AUTO_INC_DEC
case '<':
case '>':
/* ??? Before auto-inc-dec, auto inc/dec insns are not supposed
Match any memory and hope things are resolved after reload. */
incdec_ok = true;
-#endif
+ /* FALLTHRU */
default:
cn = lookup_constraint (constraint);
+ rtx mem = NULL;
switch (get_constraint_type (cn))
{
case CT_REGISTER:
break;
case CT_MEMORY:
+ case CT_RELAXED_MEMORY:
+ mem = op;
+ /* Fall through. */
+ case CT_SPECIAL_MEMORY:
/* Every memory operand can be reloaded to fit. */
- result = result || memory_operand (op, VOIDmode);
+ if (!mem)
+ mem = extract_mem_from_operand (op);
+ result = result || memory_operand (mem, VOIDmode);
break;
case CT_ADDRESS:
len = CONSTRAINT_LEN (c, constraint);
do
constraint++;
- while (--len && *constraint);
+ while (--len && *constraint && *constraint != ',');
if (len)
return 0;
}
-#ifdef AUTO_INC_DEC
/* For operands without < or > constraints reject side-effects. */
- if (!incdec_ok && result && MEM_P (op))
+ if (AUTO_INC_DEC && !incdec_ok && result && MEM_P (op))
switch (GET_CODE (XEXP (op, 0)))
{
case PRE_INC:
default:
break;
}
-#endif
return result;
}
return 0;
}
\f
-/* Return 1 if OP is a memory reference
- whose address contains no side effects
- and remains valid after the addition
- of a positive integer less than the
- size of the object being referenced.
+/* Return true if OP is a memory reference whose address contains
+ no side effects and remains valid after the addition of a positive
+ integer less than the size of the object being referenced.
We assume that the original address is valid and do not check it.
This uses strict_memory_address_p as a subroutine, so
don't use it before reload. */
-int
+bool
offsettable_memref_p (rtx op)
{
return ((MEM_P (op))
/* Similar, but don't require a strictly valid mem ref:
consider pseudo-regs valid as index or base regs. */
-int
+bool
offsettable_nonstrict_memref_p (rtx op)
{
return ((MEM_P (op))
MEM_ADDR_SPACE (op)));
}
-/* Return 1 if Y is a memory address which contains no side effects
+/* Return true if Y is a memory address which contains no side effects
and would remain valid for address space AS after the addition of
a positive integer less than the size of that mode.
If STRICTP is nonzero, we require a strictly valid address,
for the sake of use in reload.c. */
-int
-offsettable_address_addr_space_p (int strictp, enum machine_mode mode, rtx y,
+bool
+offsettable_address_addr_space_p (int strictp, machine_mode mode, rtx y,
addr_space_t as)
{
enum rtx_code ycode = GET_CODE (y);
rtx z;
rtx y1 = y;
rtx *y2;
- int (*addressp) (enum machine_mode, rtx, addr_space_t) =
+ bool (*addressp) (machine_mode, rtx, addr_space_t) =
(strictp ? strict_memory_address_addr_space_p
: memory_address_addr_space_p);
- unsigned int mode_sz = GET_MODE_SIZE (mode);
+ poly_int64 mode_sz = GET_MODE_SIZE (mode);
if (CONSTANT_ADDRESS_P (y))
- return 1;
+ return true;
/* Adjusting an offsettable address involves changing to a narrower mode.
Make sure that's OK. */
if (mode_dependent_address_p (y, as))
- return 0;
+ return false;
- enum machine_mode address_mode = GET_MODE (y);
+ machine_mode address_mode = GET_MODE (y);
if (address_mode == VOIDmode)
address_mode = targetm.addr_space.address_mode (as);
#ifdef POINTERS_EXTEND_UNSIGNED
- enum machine_mode pointer_mode = targetm.addr_space.pointer_mode (as);
+ machine_mode pointer_mode = targetm.addr_space.pointer_mode (as);
#endif
/* ??? How much offset does an offsettable BLKmode reference need?
Clearly that depends on the situation in which it's being used.
However, the current situation in which we test 0xffffffff is
less than ideal. Caveat user. */
- if (mode_sz == 0)
+ if (known_eq (mode_sz, 0))
mode_sz = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
/* If the expression contains a constant term,
if ((ycode == PLUS) && (y2 = find_constant_term_loc (&y1)))
{
- int good;
+ bool good;
y1 = *y2;
*y2 = plus_constant (address_mode, *y2, mode_sz - 1);
}
if (GET_RTX_CLASS (ycode) == RTX_AUTOINC)
- return 0;
+ return false;
/* The offset added here is chosen as the maximum offset that
any instruction could need to add when operating on something
go inside a LO_SUM here, so we do so as well. */
if (GET_CODE (y) == LO_SUM
&& mode != BLKmode
- && mode_sz <= GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT)
+ && known_le (mode_sz, GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT))
z = gen_rtx_LO_SUM (address_mode, XEXP (y, 0),
plus_constant (address_mode, XEXP (y, 1),
mode_sz - 1));
return (*addressp) (QImode, z, as);
}
-/* Return 1 if ADDR is an address-expression whose effect depends
+/* Return true if ADDR is an address-expression whose effect depends
on the mode of the memory reference it is used in.
ADDRSPACE is the address space associated with the address.
return targetm.mode_dependent_address_p (addr, addrspace);
}
-\f
-/* Return the mask of operand alternatives that are allowed for INSN.
- This mask depends only on INSN and on the current target; it does not
- depend on things like the values of operands. */
+\f
+/* Return true if boolean attribute ATTR is supported. */
+
+static bool
+have_bool_attr (bool_attr attr)
+{
+ switch (attr)
+ {
+ case BA_ENABLED:
+ return HAVE_ATTR_enabled;
+ case BA_PREFERRED_FOR_SIZE:
+ return HAVE_ATTR_enabled || HAVE_ATTR_preferred_for_size;
+ case BA_PREFERRED_FOR_SPEED:
+ return HAVE_ATTR_enabled || HAVE_ATTR_preferred_for_speed;
+ }
+ gcc_unreachable ();
+}
-alternative_mask
-get_enabled_alternatives (rtx insn)
+/* Return the value of ATTR for instruction INSN. */
+
+static bool
+get_bool_attr (rtx_insn *insn, bool_attr attr)
{
- /* Quick exit for asms and for targets that don't use the "enabled"
- attribute. */
- int code = INSN_CODE (insn);
- if (code < 0 || !HAVE_ATTR_enabled)
- return ALL_ALTERNATIVES;
+ switch (attr)
+ {
+ case BA_ENABLED:
+ return get_attr_enabled (insn);
+ case BA_PREFERRED_FOR_SIZE:
+ return get_attr_enabled (insn) && get_attr_preferred_for_size (insn);
+ case BA_PREFERRED_FOR_SPEED:
+ return get_attr_enabled (insn) && get_attr_preferred_for_speed (insn);
+ }
+ gcc_unreachable ();
+}
- /* Calling get_attr_enabled can be expensive, so cache the mask
- for speed. */
- if (this_target_recog->x_enabled_alternatives[code])
- return this_target_recog->x_enabled_alternatives[code];
+/* Like get_bool_attr_mask, but don't use the cache. */
- /* Temporarily install enough information for get_attr_enabled to assume
+static alternative_mask
+get_bool_attr_mask_uncached (rtx_insn *insn, bool_attr attr)
+{
+ /* Temporarily install enough information for get_attr_<foo> to assume
that the insn operands are already cached. As above, the attribute
mustn't depend on the values of operands, so we don't provide their
real values here. */
- rtx old_insn = recog_data.insn;
+ rtx_insn *old_insn = recog_data.insn;
int old_alternative = which_alternative;
recog_data.insn = insn;
- alternative_mask enabled = ALL_ALTERNATIVES;
- int n_alternatives = insn_data[code].n_alternatives;
+ alternative_mask mask = ALL_ALTERNATIVES;
+ int n_alternatives = insn_data[INSN_CODE (insn)].n_alternatives;
for (int i = 0; i < n_alternatives; i++)
{
which_alternative = i;
- if (!get_attr_enabled (insn))
- enabled &= ~ALTERNATIVE_BIT (i);
+ if (!get_bool_attr (insn, attr))
+ mask &= ~ALTERNATIVE_BIT (i);
}
recog_data.insn = old_insn;
which_alternative = old_alternative;
+ return mask;
+}
+
+/* Return the mask of operand alternatives that are allowed for INSN
+ by boolean attribute ATTR. This mask depends only on INSN and on
+ the current target; it does not depend on things like the values of
+ operands. */
+
+static alternative_mask
+get_bool_attr_mask (rtx_insn *insn, bool_attr attr)
+{
+ /* Quick exit for asms and for targets that don't use these attributes. */
+ int code = INSN_CODE (insn);
+ if (code < 0 || !have_bool_attr (attr))
+ return ALL_ALTERNATIVES;
+
+ /* Calling get_attr_<foo> can be expensive, so cache the mask
+ for speed. */
+ if (!this_target_recog->x_bool_attr_masks[code][attr])
+ this_target_recog->x_bool_attr_masks[code][attr]
+ = get_bool_attr_mask_uncached (insn, attr);
+ return this_target_recog->x_bool_attr_masks[code][attr];
+}
+
+/* Return the set of alternatives of INSN that are allowed by the current
+ target. */
+
+alternative_mask
+get_enabled_alternatives (rtx_insn *insn)
+{
+ return get_bool_attr_mask (insn, BA_ENABLED);
+}
+
+/* Return the set of alternatives of INSN that are allowed by the current
+ target and are preferred for the current size/speed optimization
+ choice. */
+
+alternative_mask
+get_preferred_alternatives (rtx_insn *insn)
+{
+ if (optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn)))
+ return get_bool_attr_mask (insn, BA_PREFERRED_FOR_SPEED);
+ else
+ return get_bool_attr_mask (insn, BA_PREFERRED_FOR_SIZE);
+}
- this_target_recog->x_enabled_alternatives[code] = enabled;
- return enabled;
+/* Return the set of alternatives of INSN that are allowed by the current
+ target and are preferred for the size/speed optimization choice
+ associated with BB. Passing a separate BB is useful if INSN has not
+ been emitted yet or if we are considering moving it to a different
+ block. */
+
+alternative_mask
+get_preferred_alternatives (rtx_insn *insn, basic_block bb)
+{
+ if (optimize_bb_for_speed_p (bb))
+ return get_bool_attr_mask (insn, BA_PREFERRED_FOR_SPEED);
+ else
+ return get_bool_attr_mask (insn, BA_PREFERRED_FOR_SIZE);
+}
+
+/* Assert that the cached boolean attributes for INSN are still accurate.
+ The backend is required to define these attributes in a way that only
+ depends on the current target (rather than operands, compiler phase,
+ etc.). */
+
+bool
+check_bool_attrs (rtx_insn *insn)
+{
+ int code = INSN_CODE (insn);
+ if (code >= 0)
+ for (int i = 0; i <= BA_LAST; ++i)
+ {
+ enum bool_attr attr = (enum bool_attr) i;
+ if (this_target_recog->x_bool_attr_masks[code][attr])
+ gcc_assert (this_target_recog->x_bool_attr_masks[code][attr]
+ == get_bool_attr_mask_uncached (insn, attr));
+ }
+ return true;
}
/* Like extract_insn, but save insn extracted and don't extract again, when
recog_data.insn = insn;
}
+/* Do uncached extract_insn, constrain_operands and complain about failures.
+ This should be used when extracting a pre-existing constrained instruction
+ if the caller wants to know which alternative was chosen. */
+void
+extract_constrain_insn (rtx_insn *insn)
+{
+ extract_insn (insn);
+ if (!constrain_operands (reload_completed, get_enabled_alternatives (insn)))
+ fatal_insn_not_found (insn);
+}
+
/* Do cached extract_insn, constrain_operands and complain about failures.
Used by insn_attrtab. */
void
{
extract_insn_cached (insn);
if (which_alternative == -1
- && !constrain_operands (reload_completed))
+ && !constrain_operands (reload_completed,
+ get_enabled_alternatives (insn)))
fatal_insn_not_found (insn);
}
-/* Do cached constrain_operands and complain about failures. */
+/* Do cached constrain_operands on INSN and complain about failures. */
int
-constrain_operands_cached (int strict)
+constrain_operands_cached (rtx_insn *insn, int strict)
{
if (which_alternative == -1)
- return constrain_operands (strict);
+ return constrain_operands (strict, get_enabled_alternatives (insn));
else
return 1;
}
case ADDR_VEC:
case ADDR_DIFF_VEC:
case VAR_LOCATION:
+ case DEBUG_MARKER:
return;
case SET:
case PARALLEL:
if ((GET_CODE (XVECEXP (body, 0, 0)) == SET
&& GET_CODE (SET_SRC (XVECEXP (body, 0, 0))) == ASM_OPERANDS)
- || GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
+ || GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS
+ || GET_CODE (XVECEXP (body, 0, 0)) == ASM_INPUT)
goto asm_insn;
else
goto normal_insn;
gcc_assert (recog_data.n_alternatives <= MAX_RECOG_ALTERNATIVES);
- recog_data.enabled_alternatives = get_enabled_alternatives (insn);
-
recog_data.insn = NULL;
which_alternative = -1;
}
-/* Fill in OP_ALT_BASE for an instruction that has N_OPERANDS operands,
- N_ALTERNATIVES alternatives and constraint strings CONSTRAINTS.
- OP_ALT_BASE has N_ALTERNATIVES * N_OPERANDS entries and CONSTRAINTS
- has N_OPERANDS entries. */
+/* Fill in OP_ALT_BASE for an instruction that has N_OPERANDS
+ operands, N_ALTERNATIVES alternatives and constraint strings
+ CONSTRAINTS. OP_ALT_BASE has N_ALTERNATIVES * N_OPERANDS entries
+ and CONSTRAINTS has N_OPERANDS entries. OPLOC should be passed in
+ if the insn is an asm statement and preprocessing should take the
+ asm operands into account, e.g. to determine whether they could be
+ addresses in constraints that require addresses; it should then
+ point to an array of pointers to each operand. */
void
preprocess_constraints (int n_operands, int n_alternatives,
const char **constraints,
- operand_alternative *op_alt_base)
+ operand_alternative *op_alt_base,
+ rtx **oploc)
{
for (int i = 0; i < n_operands; i++)
{
break;
case CT_MEMORY:
+ case CT_SPECIAL_MEMORY:
+ case CT_RELAXED_MEMORY:
op_alt[i].memory_ok = 1;
break;
case CT_ADDRESS:
+ if (oploc && !address_operand (*oploc[i], VOIDmode))
+ break;
+
op_alt[i].is_address = 1;
op_alt[i].cl
= (reg_class_subunion
instruction ICODE. */
const operand_alternative *
-preprocess_insn_constraints (int icode)
+preprocess_insn_constraints (unsigned int icode)
{
- gcc_checking_assert (IN_RANGE (icode, 0, LAST_INSN_CODE));
+ gcc_checking_assert (IN_RANGE (icode, 0, NUM_INSN_CODES - 1));
if (this_target_recog->x_op_alt[icode])
return this_target_recog->x_op_alt[icode];
for (int i = 0; i < n_operands; ++i)
constraints[i] = insn_data[icode].operand[i].constraint;
- preprocess_constraints (n_operands, n_alternatives, constraints, op_alt);
+ preprocess_constraints (n_operands, n_alternatives, constraints, op_alt,
+ NULL);
this_target_recog->x_op_alt[icode] = op_alt;
return op_alt;
The collected data is stored in recog_op_alt. */
void
-preprocess_constraints (rtx insn)
+preprocess_constraints (rtx_insn *insn)
{
int icode = INSN_CODE (insn);
if (icode >= 0)
int n_entries = n_operands * n_alternatives;
memset (asm_op_alt, 0, n_entries * sizeof (operand_alternative));
preprocess_constraints (n_operands, n_alternatives,
- recog_data.constraints, asm_op_alt);
+ recog_data.constraints, asm_op_alt,
+ NULL);
recog_op_alt = asm_op_alt;
}
}
/* Check the operands of an insn against the insn's operand constraints
- and return 1 if they are valid.
+ and return 1 if they match any of the alternatives in ALTERNATIVES.
+
The information about the insn's operands, constraints, operand modes
etc. is obtained from the global variables set up by extract_insn.
};
int
-constrain_operands (int strict)
+constrain_operands (int strict, alternative_mask alternatives)
{
const char *constraints[MAX_RECOG_OPERANDS];
int matching_operands[MAX_RECOG_OPERANDS];
return 1;
for (c = 0; c < recog_data.n_operands; c++)
- {
- constraints[c] = recog_data.constraints[c];
- matching_operands[c] = -1;
- }
+ constraints[c] = recog_data.constraints[c];
do
{
int lose = 0;
funny_match_index = 0;
- if (!TEST_BIT (recog_data.enabled_alternatives, which_alternative))
+ if (!TEST_BIT (alternatives, which_alternative))
{
int i;
continue;
}
+ for (opno = 0; opno < recog_data.n_operands; opno++)
+ matching_operands[opno] = -1;
+
for (opno = 0; opno < recog_data.n_operands; opno++)
{
rtx op = recog_data.operand[opno];
- enum machine_mode mode = GET_MODE (op);
+ machine_mode mode = GET_MODE (op);
const char *p = constraints[opno];
int offset = 0;
int win = 0;
/* A unary operator may be accepted by the predicate, but it
is irrelevant for matching constraints. */
- if (UNARY_P (op))
+ /* For special_memory_operand, there could be a memory operand inside,
+ and it would cause a mismatch for constraint_satisfied_p. */
+ if (UNARY_P (op) && op == extract_mem_from_operand (op))
op = XEXP (op, 0);
if (GET_CODE (op) == SUBREG)
/* p is used for address_operands. When we are called by
gen_reload, no one will have checked that the address is
strictly valid, i.e., that all pseudos requiring hard regs
- have gotten them. */
- if (strict <= 0
- || (strict_memory_address_p (recog_data.operand_mode[opno],
- op)))
+ have gotten them. We also want to make sure we have a
+ valid mode. */
+ if ((GET_MODE (op) == VOIDmode
+ || SCALAR_INT_MODE_P (GET_MODE (op)))
+ && (strict <= 0
+ || (strict_memory_address_p
+ (recog_data.operand_mode[opno], op))))
win = 1;
break;
/* Every memory operand can be reloaded to fit. */
&& ((strict < 0 && MEM_P (op))
/* Before reload, accept what reload can turn
- into mem. */
+ into a mem. */
|| (strict < 0 && CONSTANT_P (op))
+ /* Before reload, accept a pseudo or hard register,
+ since LRA can turn it into a mem. */
+ || (strict < 0 && targetm.lra_p () && REG_P (op))
/* During reload, accept a pseudo */
|| (reload_in_progress && REG_P (op)
&& REGNO (op) >= FIRST_PSEUDO_REGISTER)))
= recog_data.operand[funny_match[funny_match_index].this_op];
}
-#ifdef AUTO_INC_DEC
/* For operands without < or > constraints reject side-effects. */
- if (recog_data.is_asm)
+ if (AUTO_INC_DEC && recog_data.is_asm)
{
for (opno = 0; opno < recog_data.n_operands; opno++)
if (MEM_P (recog_data.operand[opno]))
break;
}
}
-#endif
+
return 1;
}
}
/* If we are about to reject this, but we are not to test strictly,
try a very loose test. Only return failure if it fails also. */
if (strict == 0)
- return constrain_operands (-1);
+ return constrain_operands (-1, alternatives);
else
return 0;
}
bool
reg_fits_class_p (const_rtx operand, reg_class_t cl, int offset,
- enum machine_mode mode)
+ machine_mode mode)
{
unsigned int regno = REGNO (operand);
split_all_insns_noflow. Return last insn in the sequence if successful,
or NULL if unsuccessful. */
-static rtx
+static rtx_insn *
split_insn (rtx_insn *insn)
{
/* Split insns here to get max fine-grain parallelism. */
rtx insn_set, last_set, note;
if (last == insn)
- return NULL_RTX;
+ return NULL;
/* If the original instruction was a single set that was known to be
equivalent to a constant, see if we can say the same about the last
void
split_all_insns (void)
{
- sbitmap blocks;
bool changed;
+ bool need_cfg_cleanup = false;
basic_block bb;
- blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap blocks (last_basic_block_for_fn (cfun));
bitmap_clear (blocks);
changed = false;
CODE_LABELS and short-out basic blocks. */
next = NEXT_INSN (insn);
finish = (insn == BB_END (bb));
+
+ /* If INSN has a REG_EH_REGION note and we split INSN, the
+ resulting split may not have/need REG_EH_REGION notes.
+
+ If that happens and INSN was the last reference to the
+ given EH region, then the EH region will become unreachable.
+ We cannot leave the unreachable blocks in the CFG as that
+ will trigger a checking failure.
+
+ So track if INSN has a REG_EH_REGION note. If so and we
+ split INSN, then trigger a CFG cleanup. */
+ rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
if (INSN_P (insn))
{
rtx set = single_set (insn);
nops then anyways. */
if (reload_completed)
delete_insn_and_edges (insn);
+ if (note)
+ need_cfg_cleanup = true;
}
else
{
{
bitmap_set_bit (blocks, bb->index);
changed = true;
+ if (note)
+ need_cfg_cleanup = true;
}
}
}
default_rtl_profile ();
if (changed)
- find_many_sub_basic_blocks (blocks);
-
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ {
+ find_many_sub_basic_blocks (blocks);
+
+ /* Splitting could drop an REG_EH_REGION if it potentially
+ trapped in its original form, but does not in its split
+ form. Consider a FLOAT_TRUNCATE which splits into a memory
+ store/load pair and -fnon-call-exceptions. */
+ if (need_cfg_cleanup)
+ cleanup_cfg (0);
+ }
- sbitmap_free (blocks);
+ checking_verify_flow_info ();
}
/* Same as split_all_insns, but do not expect CFG to be available.
return 0;
}
\f
-#ifdef HAVE_peephole2
struct peep2_insn_data
{
- rtx insn;
+ rtx_insn *insn;
regset live_before;
};
/* The number of instructions available to match a peep2. */
int peep2_current_count;
-/* A non-insn marker indicating the last insn of the block.
- The live_before regset for this element is correct, indicating
- DF_LIVE_OUT for the block. */
-#define PEEP2_EOB pc_rtx
+/* A marker indicating the last insn of the block. The live_before regset
+ for this element is correct, indicating DF_LIVE_OUT for the block. */
+#define PEEP2_EOB invalid_insn_rtx
/* Wrap N to fit into the peep2_insn_data buffer. */
does not exist. Used by the recognizer to find the next insn to match
in a multi-insn pattern. */
-rtx
+rtx_insn *
peep2_next_insn (int n)
{
gcc_assert (n <= peep2_current_count);
int
peep2_reg_dead_p (int ofs, rtx reg)
{
- int regno, n;
-
gcc_assert (ofs < MAX_INSNS_PER_PEEP2 + 1);
ofs = peep2_buf_position (peep2_current + ofs);
gcc_assert (peep2_insn_data[ofs].insn != NULL_RTX);
- regno = REGNO (reg);
- n = hard_regno_nregs[regno][GET_MODE (reg)];
- while (--n >= 0)
- if (REGNO_REG_SET_P (peep2_insn_data[ofs].live_before, regno + n))
+ unsigned int end_regno = END_REGNO (reg);
+ for (unsigned int regno = REGNO (reg); regno < end_regno; ++regno)
+ if (REGNO_REG_SET_P (peep2_insn_data[ofs].live_before, regno))
return 0;
return 1;
}
rtx
peep2_find_free_register (int from, int to, const char *class_str,
- enum machine_mode mode, HARD_REG_SET *reg_set)
+ machine_mode mode, HARD_REG_SET *reg_set)
{
enum reg_class cl;
HARD_REG_SET live;
#endif
/* Can it support the mode we need? */
- if (! HARD_REGNO_MODE_OK (regno, mode))
+ if (!targetm.hard_regno_mode_ok (regno, mode))
continue;
success = 1;
- for (j = 0; success && j < hard_regno_nregs[regno][mode]; j++)
+ for (j = 0; success && j < hard_regno_nregs (regno, mode); j++)
{
/* Don't allocate fixed registers. */
if (fixed_regs[regno + j])
break;
}
/* And that we don't create an extra save/restore. */
- if (! call_used_regs[regno + j] && ! df_regs_ever_live_p (regno + j))
+ if (! crtl->abi->clobbers_full_reg_p (regno + j)
+ && ! df_regs_ever_live_p (regno + j))
{
success = 0;
break;
/* Indicate that all slots except the last holds invalid data. */
for (i = 0; i < MAX_INSNS_PER_PEEP2; ++i)
- peep2_insn_data[i].insn = NULL_RTX;
+ peep2_insn_data[i].insn = NULL;
peep2_current_count = 0;
/* Indicate that the last slot contains live_after data. */
COPY_REG_SET (peep2_insn_data[MAX_INSNS_PER_PEEP2].live_before, live);
}
+/* Copies frame related info of an insn (OLD_INSN) to the single
+ insn (NEW_INSN) that was obtained by splitting OLD_INSN. */
+
+void
+copy_frame_info_to_split_insn (rtx_insn *old_insn, rtx_insn *new_insn)
+{
+ bool any_note = false;
+ rtx note;
+
+ if (!RTX_FRAME_RELATED_P (old_insn))
+ return;
+
+ RTX_FRAME_RELATED_P (new_insn) = 1;
+
+ /* Allow the backend to fill in a note during the split. */
+ for (note = REG_NOTES (new_insn); note ; note = XEXP (note, 1))
+ switch (REG_NOTE_KIND (note))
+ {
+ case REG_FRAME_RELATED_EXPR:
+ case REG_CFA_DEF_CFA:
+ case REG_CFA_ADJUST_CFA:
+ case REG_CFA_OFFSET:
+ case REG_CFA_REGISTER:
+ case REG_CFA_EXPRESSION:
+ case REG_CFA_RESTORE:
+ case REG_CFA_SET_VDRAP:
+ any_note = true;
+ break;
+ default:
+ break;
+ }
+
+ /* If the backend didn't supply a note, copy one over. */
+ if (!any_note)
+ for (note = REG_NOTES (old_insn); note ; note = XEXP (note, 1))
+ switch (REG_NOTE_KIND (note))
+ {
+ case REG_FRAME_RELATED_EXPR:
+ case REG_CFA_DEF_CFA:
+ case REG_CFA_ADJUST_CFA:
+ case REG_CFA_OFFSET:
+ case REG_CFA_REGISTER:
+ case REG_CFA_EXPRESSION:
+ case REG_CFA_RESTORE:
+ case REG_CFA_SET_VDRAP:
+ add_reg_note (new_insn, REG_NOTE_KIND (note), XEXP (note, 0));
+ any_note = true;
+ break;
+ default:
+ break;
+ }
+
+ /* If there still isn't a note, make sure the unwind info sees the
+ same expression as before the split. */
+ if (!any_note)
+ {
+ rtx old_set, new_set;
+
+ /* The old insn had better have been simple, or annotated. */
+ old_set = single_set (old_insn);
+ gcc_assert (old_set != NULL);
+
+ new_set = single_set (new_insn);
+ if (!new_set || !rtx_equal_p (new_set, old_set))
+ add_reg_note (new_insn, REG_FRAME_RELATED_EXPR, old_set);
+ }
+
+ /* Copy prologue/epilogue status. This is required in order to keep
+ proper placement of EPILOGUE_BEG and the DW_CFA_remember_state. */
+ maybe_copy_prologue_epilogue_insn (old_insn, new_insn);
+}
+
/* While scanning basic block BB, we found a match of length MATCH_LEN,
starting at INSN. Perform the replacement, removing the old insns and
replacing them with ATTEMPT. Returns the last insn emitted, or NULL
if the replacement is rejected. */
static rtx_insn *
-peep2_attempt (basic_block bb, rtx uncast_insn, int match_len, rtx_insn *attempt)
+peep2_attempt (basic_block bb, rtx_insn *insn, int match_len, rtx_insn *attempt)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
int i;
rtx_insn *last, *before_try, *x;
rtx eh_note, as_note;
- rtx old_insn;
+ rtx_insn *old_insn;
rtx_insn *new_insn;
bool was_call = false;
old_insn = peep2_insn_data[peep2_current].insn;
if (RTX_FRAME_RELATED_P (old_insn))
{
- bool any_note = false;
- rtx note;
-
if (match_len != 0)
return NULL;
return NULL;
/* We have a 1-1 replacement. Copy over any frame-related info. */
- RTX_FRAME_RELATED_P (new_insn) = 1;
-
- /* Allow the backend to fill in a note during the split. */
- for (note = REG_NOTES (new_insn); note ; note = XEXP (note, 1))
- switch (REG_NOTE_KIND (note))
- {
- case REG_FRAME_RELATED_EXPR:
- case REG_CFA_DEF_CFA:
- case REG_CFA_ADJUST_CFA:
- case REG_CFA_OFFSET:
- case REG_CFA_REGISTER:
- case REG_CFA_EXPRESSION:
- case REG_CFA_RESTORE:
- case REG_CFA_SET_VDRAP:
- any_note = true;
- break;
- default:
- break;
- }
-
- /* If the backend didn't supply a note, copy one over. */
- if (!any_note)
- for (note = REG_NOTES (old_insn); note ; note = XEXP (note, 1))
- switch (REG_NOTE_KIND (note))
- {
- case REG_FRAME_RELATED_EXPR:
- case REG_CFA_DEF_CFA:
- case REG_CFA_ADJUST_CFA:
- case REG_CFA_OFFSET:
- case REG_CFA_REGISTER:
- case REG_CFA_EXPRESSION:
- case REG_CFA_RESTORE:
- case REG_CFA_SET_VDRAP:
- add_reg_note (new_insn, REG_NOTE_KIND (note), XEXP (note, 0));
- any_note = true;
- break;
- default:
- break;
- }
-
- /* If there still isn't a note, make sure the unwind info sees the
- same expression as before the split. */
- if (!any_note)
- {
- rtx old_set, new_set;
-
- /* The old insn had better have been simple, or annotated. */
- old_set = single_set (old_insn);
- gcc_assert (old_set != NULL);
-
- new_set = single_set (new_insn);
- if (!new_set || !rtx_equal_p (new_set, old_set))
- add_reg_note (new_insn, REG_FRAME_RELATED_EXPR, old_set);
- }
-
- /* Copy prologue/epilogue status. This is required in order to keep
- proper placement of EPILOGUE_BEG and the DW_CFA_remember_state. */
- maybe_copy_prologue_epilogue_insn (old_insn, new_insn);
+ copy_frame_info_to_split_insn (old_insn, new_insn);
}
/* If we are splitting a CALL_INSN, look for the CALL_INSN
case REG_NORETURN:
case REG_SETJMP:
case REG_TM:
+ case REG_CALL_NOCF_CHECK:
add_reg_note (new_insn, REG_NOTE_KIND (note),
XEXP (note, 0));
break;
eh_note = find_reg_note (peep2_insn_data[i].insn, REG_EH_REGION, NULL_RTX);
/* Replace the old sequence with the new. */
- rtx_insn *peepinsn = as_a <rtx_insn *> (peep2_insn_data[i].insn);
+ rtx_insn *peepinsn = peep2_insn_data[i].insn;
last = emit_insn_after_setloc (attempt,
peep2_insn_data[i].insn,
INSN_LOCATION (peepinsn));
+ if (JUMP_P (peepinsn) && JUMP_P (last))
+ CROSSING_JUMP_P (last) = CROSSING_JUMP_P (peepinsn);
before_try = PREV_INSN (insn);
delete_insn_chain (insn, peep2_insn_data[i].insn, false);
flags);
nehe->probability = eh_edge->probability;
- nfte->probability
- = REG_BR_PROB_BASE - nehe->probability;
+ nfte->probability = nehe->probability.invert ();
peep2_do_cleanup_cfg |= purge_dead_edges (nfte->dest);
bb = nfte->src;
/* Re-insert the ARGS_SIZE notes. */
if (as_note)
- fixup_args_size_notes (before_try, last, INTVAL (XEXP (as_note, 0)));
+ fixup_args_size_notes (before_try, last, get_args_size (as_note));
+
+ /* Scan the new insns for embedded side effects and add appropriate
+ REG_INC notes. */
+ if (AUTO_INC_DEC)
+ for (x = last; x != before_try; x = PREV_INSN (x))
+ if (NONDEBUG_INSN_P (x))
+ add_auto_inc_notes (x, PATTERN (x));
/* If we generated a jump instruction, it won't have
JUMP_LABEL set. Recompute after we're done. */
add more instructions to the buffer. */
static bool
-peep2_fill_buffer (basic_block bb, rtx insn, regset live)
+peep2_fill_buffer (basic_block bb, rtx_insn *insn, regset live)
{
int pos;
COPY_REG_SET (peep2_insn_data[pos].live_before, live);
peep2_current_count++;
- df_simulate_one_insn_forwards (bb, as_a <rtx_insn *> (insn), live);
+ df_simulate_one_insn_forwards (bb, insn, live);
return true;
}
insn = BB_HEAD (bb);
for (;;)
{
- rtx_insn *attempt;
- rtx head;
+ rtx_insn *attempt, *head;
int match_len;
if (!past_end && !NONDEBUG_INSN_P (insn))
/* Match the peephole. */
head = peep2_insn_data[peep2_current].insn;
- attempt = safe_as_a <rtx_insn *> (
- peephole2_insns (PATTERN (head), head, &match_len));
+ attempt = peephole2_insns (PATTERN (head), head, &match_len);
if (attempt != NULL)
{
rtx_insn *last = peep2_attempt (bb, head, match_len, attempt);
if (peep2_do_cleanup_cfg)
cleanup_cfg (CLEANUP_CFG_CHANGED);
}
-#endif /* HAVE_peephole2 */
/* Common predicates for use with define_bypass. */
-/* True if the dependency between OUT_INSN and IN_INSN is on the store
- data not the address operand(s) of the store. IN_INSN and OUT_INSN
- must be either a single_set or a PARALLEL with SETs inside. */
+/* Helper function for store_data_bypass_p, handle just a single SET
+ IN_SET. */
-int
-store_data_bypass_p (rtx out_insn, rtx in_insn)
+static bool
+store_data_bypass_p_1 (rtx_insn *out_insn, rtx in_set)
{
- rtx out_set, in_set;
- rtx out_pat, in_pat;
- rtx out_exp, in_exp;
- int i, j;
+ if (!MEM_P (SET_DEST (in_set)))
+ return false;
- in_set = single_set (in_insn);
- if (in_set)
+ rtx out_set = single_set (out_insn);
+ if (out_set)
+ return !reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_set));
+
+ rtx out_pat = PATTERN (out_insn);
+ if (GET_CODE (out_pat) != PARALLEL)
+ return false;
+
+ for (int i = 0; i < XVECLEN (out_pat, 0); i++)
{
- if (!MEM_P (SET_DEST (in_set)))
- return false;
+ rtx out_exp = XVECEXP (out_pat, 0, i);
- out_set = single_set (out_insn);
- if (out_set)
- {
- if (reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_set)))
- return false;
- }
- else
- {
- out_pat = PATTERN (out_insn);
+ if (GET_CODE (out_exp) == CLOBBER || GET_CODE (out_exp) == USE)
+ continue;
- if (GET_CODE (out_pat) != PARALLEL)
- return false;
+ gcc_assert (GET_CODE (out_exp) == SET);
- for (i = 0; i < XVECLEN (out_pat, 0); i++)
- {
- out_exp = XVECEXP (out_pat, 0, i);
+ if (reg_mentioned_p (SET_DEST (out_exp), SET_DEST (in_set)))
+ return false;
+ }
- if (GET_CODE (out_exp) == CLOBBER)
- continue;
+ return true;
+}
- gcc_assert (GET_CODE (out_exp) == SET);
+/* True if the dependency between OUT_INSN and IN_INSN is on the store
+ data not the address operand(s) of the store. IN_INSN and OUT_INSN
+ must be either a single_set or a PARALLEL with SETs inside. */
- if (reg_mentioned_p (SET_DEST (out_exp), SET_DEST (in_set)))
- return false;
- }
- }
- }
- else
- {
- in_pat = PATTERN (in_insn);
- gcc_assert (GET_CODE (in_pat) == PARALLEL);
+int
+store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
+{
+ rtx in_set = single_set (in_insn);
+ if (in_set)
+ return store_data_bypass_p_1 (out_insn, in_set);
- for (i = 0; i < XVECLEN (in_pat, 0); i++)
- {
- in_exp = XVECEXP (in_pat, 0, i);
+ rtx in_pat = PATTERN (in_insn);
+ if (GET_CODE (in_pat) != PARALLEL)
+ return false;
- if (GET_CODE (in_exp) == CLOBBER)
- continue;
+ for (int i = 0; i < XVECLEN (in_pat, 0); i++)
+ {
+ rtx in_exp = XVECEXP (in_pat, 0, i);
- gcc_assert (GET_CODE (in_exp) == SET);
+ if (GET_CODE (in_exp) == CLOBBER || GET_CODE (in_exp) == USE)
+ continue;
- if (!MEM_P (SET_DEST (in_exp)))
- return false;
+ gcc_assert (GET_CODE (in_exp) == SET);
- out_set = single_set (out_insn);
- if (out_set)
- {
- if (reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_exp)))
- return false;
- }
- else
- {
- out_pat = PATTERN (out_insn);
- gcc_assert (GET_CODE (out_pat) == PARALLEL);
-
- for (j = 0; j < XVECLEN (out_pat, 0); j++)
- {
- out_exp = XVECEXP (out_pat, 0, j);
-
- if (GET_CODE (out_exp) == CLOBBER)
- continue;
-
- gcc_assert (GET_CODE (out_exp) == SET);
-
- if (reg_mentioned_p (SET_DEST (out_exp), SET_DEST (in_exp)))
- return false;
- }
- }
- }
+ if (!store_data_bypass_p_1 (out_insn, in_exp))
+ return false;
}
return true;
of insn categorization may be any JUMP or CALL insn. */
int
-if_test_bypass_p (rtx out_insn, rtx in_insn)
+if_test_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
{
rtx out_set, in_set;
static unsigned int
rest_of_handle_peephole2 (void)
{
-#ifdef HAVE_peephole2
- peephole2_optimize ();
-#endif
+ if (HAVE_peephole2)
+ peephole2_optimize ();
+
return 0;
}
OPTGROUP_NONE, /* optinfo_flags */
TV_NONE, /* tv_id */
0, /* properties_required */
- 0, /* properties_provided */
+ PROP_rtl_split_insns, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
return new pass_split_all_insns (ctxt);
}
-static unsigned int
-rest_of_handle_split_after_reload (void)
-{
- /* If optimizing, then go ahead and split insns now. */
-#ifndef STACK_REGS
- if (optimize > 0)
-#endif
- split_all_insns ();
- return 0;
-}
-
namespace {
const pass_data pass_data_split_after_reload =
{}
/* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ /* If optimizing, then go ahead and split insns now. */
+ return optimize > 0;
+ }
+
virtual unsigned int execute (function *)
{
- return rest_of_handle_split_after_reload ();
+ split_all_insns ();
+ return 0;
}
}; // class pass_split_after_reload
return new pass_split_after_reload (ctxt);
}
+static bool
+enable_split_before_sched2 (void)
+{
+#ifdef INSN_SCHEDULING
+ return optimize > 0 && flag_schedule_insns_after_reload;
+#else
+ return false;
+#endif
+}
+
namespace {
-const pass_data pass_data_split_before_regstack =
+const pass_data pass_data_split_before_sched2 =
{
RTL_PASS, /* type */
"split3", /* name */
0, /* todo_flags_finish */
};
-class pass_split_before_regstack : public rtl_opt_pass
+class pass_split_before_sched2 : public rtl_opt_pass
{
public:
- pass_split_before_regstack (gcc::context *ctxt)
- : rtl_opt_pass (pass_data_split_before_regstack, ctxt)
+ pass_split_before_sched2 (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_split_before_sched2, ctxt)
{}
/* opt_pass methods: */
- virtual bool gate (function *);
+ virtual bool gate (function *)
+ {
+ return enable_split_before_sched2 ();
+ }
+
virtual unsigned int execute (function *)
{
split_all_insns ();
return 0;
}
-}; // class pass_split_before_regstack
-
-bool
-pass_split_before_regstack::gate (function *)
-{
-#if HAVE_ATTR_length && defined (STACK_REGS)
- /* If flow2 creates new instructions which need splitting
- and scheduling after reload is not done, they might not be
- split until final which doesn't allow splitting
- if HAVE_ATTR_length. */
-# ifdef INSN_SCHEDULING
- return (optimize && !flag_schedule_insns_after_reload);
-# else
- return (optimize);
-# endif
-#else
- return 0;
-#endif
-}
+}; // class pass_split_before_sched2
} // anon namespace
rtl_opt_pass *
-make_pass_split_before_regstack (gcc::context *ctxt)
-{
- return new pass_split_before_regstack (ctxt);
-}
-
-static unsigned int
-rest_of_handle_split_before_sched2 (void)
+make_pass_split_before_sched2 (gcc::context *ctxt)
{
-#ifdef INSN_SCHEDULING
- split_all_insns ();
-#endif
- return 0;
+ return new pass_split_before_sched2 (ctxt);
}
namespace {
-const pass_data pass_data_split_before_sched2 =
+const pass_data pass_data_split_before_regstack =
{
RTL_PASS, /* type */
"split4", /* name */
0, /* todo_flags_finish */
};
-class pass_split_before_sched2 : public rtl_opt_pass
+class pass_split_before_regstack : public rtl_opt_pass
{
public:
- pass_split_before_sched2 (gcc::context *ctxt)
- : rtl_opt_pass (pass_data_split_before_sched2, ctxt)
+ pass_split_before_regstack (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_split_before_regstack, ctxt)
{}
/* opt_pass methods: */
- virtual bool gate (function *)
- {
-#ifdef INSN_SCHEDULING
- return optimize > 0 && flag_schedule_insns_after_reload;
-#else
- return false;
-#endif
- }
-
+ virtual bool gate (function *);
virtual unsigned int execute (function *)
{
- return rest_of_handle_split_before_sched2 ();
+ split_all_insns ();
+ return 0;
}
-}; // class pass_split_before_sched2
+}; // class pass_split_before_regstack
+
+bool
+pass_split_before_regstack::gate (function *)
+{
+#if HAVE_ATTR_length && defined (STACK_REGS)
+ /* If flow2 creates new instructions which need splitting
+ and scheduling after reload is not done, they might not be
+ split until final which doesn't allow splitting
+ if HAVE_ATTR_length. Selective scheduling can result in
+ further instructions that need splitting. */
+#ifdef INSN_SCHEDULING
+ return !enable_split_before_sched2 () || flag_selective_scheduling2;
+#else
+ return !enable_split_before_sched2 ();
+#endif
+#else
+ return false;
+#endif
+}
} // anon namespace
rtl_opt_pass *
-make_pass_split_before_sched2 (gcc::context *ctxt)
+make_pass_split_before_regstack (gcc::context *ctxt)
{
- return new pass_split_before_sched2 (ctxt);
+ return new pass_split_before_regstack (ctxt);
}
namespace {
this_target_recog->x_initialized = true;
return;
}
- memset (this_target_recog->x_enabled_alternatives, 0,
- sizeof (this_target_recog->x_enabled_alternatives));
- for (int i = 0; i < LAST_INSN_CODE; ++i)
+ memset (this_target_recog->x_bool_attr_masks, 0,
+ sizeof (this_target_recog->x_bool_attr_masks));
+ for (unsigned int i = 0; i < NUM_INSN_CODES; ++i)
if (this_target_recog->x_op_alt[i])
{
free (this_target_recog->x_op_alt[i]);