/* Output routines for Motorola MCore processor
- Copyright (C) 1993-2015 Free Software Foundation, Inc.
+ Copyright (C) 1993-2020 Free Software Foundation, Inc.
This file is part of GCC.
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
+#define IN_TARGET_CODE 1
+
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "rtl.h"
#include "tree.h"
#include "df.h"
+#include "memmodel.h"
#include "tm_p.h"
#include "stringpool.h"
+#include "attribs.h"
#include "emit-rtl.h"
#include "diagnostic-core.h"
#include "stor-layout.h"
#include "expr.h"
#include "cfgrtl.h"
#include "builtins.h"
+#include "regs.h"
/* This file should be included last. */
#include "target-def.h"
static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
static const char * output_inline_const (machine_mode, rtx *);
static void layout_mcore_frame (struct mcore_frame *);
-static void mcore_setup_incoming_varargs (cumulative_args_t, machine_mode, tree, int *, int);
+static void mcore_setup_incoming_varargs (cumulative_args_t,
+ const function_arg_info &,
+ int *, int);
static cond_type is_cond_candidate (rtx);
-static rtx_insn *emit_new_cond_insn (rtx, int);
+static rtx_insn *emit_new_cond_insn (rtx_insn *, int);
static rtx_insn *conditionalize_block (rtx_insn *);
static void conditionalize_optimization (void);
static void mcore_reorg (void);
unsigned int, tree);
#endif
static void mcore_print_operand (FILE *, rtx, int);
-static void mcore_print_operand_address (FILE *, rtx);
+static void mcore_print_operand_address (FILE *, machine_mode, rtx);
static bool mcore_print_operand_punct_valid_p (unsigned char code);
static void mcore_unique_section (tree, int);
static void mcore_encode_section_info (tree, rtx, int);
static void mcore_external_libcall (rtx);
static bool mcore_return_in_memory (const_tree, const_tree);
static int mcore_arg_partial_bytes (cumulative_args_t,
- machine_mode,
- tree, bool);
+ const function_arg_info &);
static rtx mcore_function_arg (cumulative_args_t,
- machine_mode,
- const_tree, bool);
+ const function_arg_info &);
static void mcore_function_arg_advance (cumulative_args_t,
- machine_mode,
- const_tree, bool);
+ const function_arg_info &);
static unsigned int mcore_function_arg_boundary (machine_mode,
const_tree);
static void mcore_asm_trampoline_template (FILE *);
static bool mcore_legitimate_constant_p (machine_mode, rtx);
static bool mcore_legitimate_address_p (machine_mode, rtx, bool,
addr_space_t);
+static bool mcore_hard_regno_mode_ok (unsigned int, machine_mode);
+static bool mcore_modes_tieable_p (machine_mode, machine_mode);
\f
/* MCore specific attributes. */
static const struct attribute_spec mcore_attribute_table[] =
{
- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
- affects_type_identity } */
- { "dllexport", 0, 0, true, false, false, NULL, false },
- { "dllimport", 0, 0, true, false, false, NULL, false },
- { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute,
- false },
- { NULL, 0, 0, false, false, false, NULL, false }
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
+ affects_type_identity, handler, exclude } */
+ { "dllexport", 0, 0, true, false, false, false, NULL, NULL },
+ { "dllimport", 0, 0, true, false, false, false, NULL, NULL },
+ { "naked", 0, 0, true, false, false, false,
+ mcore_handle_naked_attribute, NULL },
+ { NULL, 0, 0, false, false, false, false, NULL, NULL }
};
\f
/* Initialize the GCC target structure. */
#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P mcore_legitimate_address_p
+#undef TARGET_LRA_P
+#define TARGET_LRA_P hook_bool_void_false
+
#undef TARGET_WARN_FUNC_RETURN
#define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
+#undef TARGET_HARD_REGNO_MODE_OK
+#define TARGET_HARD_REGNO_MODE_OK mcore_hard_regno_mode_ok
+
+#undef TARGET_MODES_TIEABLE_P
+#define TARGET_MODES_TIEABLE_P mcore_modes_tieable_p
+
+#undef TARGET_CONSTANT_ALIGNMENT
+#define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
+
+#undef TARGET_HAVE_SPECULATION_SAFE_VALUE
+#define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
/* Adjust the stack and return the number of bytes taken to do it. */
for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
{
- if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
+ if (df_regs_ever_live_p (reg) && !call_used_or_fixed_reg_p (reg))
{
(*count)++;
live_regs_mask |= (1 << reg);
/* Print the operand address in x to the stream. */
static void
-mcore_print_operand_address (FILE * stream, rtx x)
+mcore_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx x)
{
switch (GET_CODE (x))
{
break;
case MEM:
mcore_print_operand_address
- (stream, XEXP (adjust_address (x, SImode, 4), 0));
+ (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
break;
default:
gcc_unreachable ();
fputs (reg_names[REGNO (x)], (stream));
break;
case MEM:
- output_address (XEXP (x, 0));
+ output_address (GET_MODE (x), XEXP (x, 0));
break;
default:
output_addr_const (stream, x);
case EQ: /* Use inverted condition, cmpne. */
code = NE;
invert = true;
- /* Drop through. */
+ /* FALLTHRU */
case NE: /* Use normal condition, cmpne. */
if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
case LE: /* Use inverted condition, reversed cmplt. */
code = GT;
invert = true;
- /* Drop through. */
+ /* FALLTHRU */
case GT: /* Use normal condition, reversed cmplt. */
if (GET_CODE (op1) == CONST_INT)
case GE: /* Use inverted condition, cmplt. */
code = LT;
invert = true;
- /* Drop through. */
+ /* FALLTHRU */
case LT: /* Use normal condition, cmplt. */
if (GET_CODE (op1) == CONST_INT &&
gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
code = LEU;
invert = true;
- /* Drop through. */
+ /* FALLTHRU */
case LEU: /* Use normal condition, reversed cmphs. */
if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
case LTU: /* Use inverted condition, cmphs. */
code = GEU;
invert = true;
- /* Drop through. */
+ /* FALLTHRU */
case GEU: /* Use normal condition, cmphs. */
if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
else
switch (GET_MODE (src)) /* r-m */
{
- case SImode:
+ case E_SImode:
return "ldw\t%0,%1";
- case HImode:
+ case E_HImode:
return "ld.h\t%0,%1";
- case QImode:
+ case E_QImode:
return "ld.b\t%0,%1";
default:
gcc_unreachable ();
else if (GET_CODE (dst) == MEM) /* m-r */
switch (GET_MODE (dst))
{
- case SImode:
+ case E_SImode:
return "stw\t%1,%0";
- case HImode:
+ case E_HImode:
return "st.h\t%1,%0";
- case QImode:
+ case E_QImode:
return "st.b\t%1,%0";
default:
gcc_unreachable ();
static void
mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
- machine_mode mode, tree type,
+ const function_arg_info &arg,
int * ptr_pretend_size ATTRIBUTE_UNUSED,
int second_time ATTRIBUTE_UNUSED)
{
/* We need to know how many argument registers are used before
the varargs start, so that we can push the remaining argument
registers during the prologue. */
- number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
+ number_of_regs_before_varargs
+ = *args_so_far + mcore_num_arg_regs (arg.mode, arg.type);
/* There is a bug somewhere in the arg handling code.
Until I can find it this workaround always pushes the
new one. Return the new insn if emitted. */
static rtx_insn *
-emit_new_cond_insn (rtx insn, int cond)
+emit_new_cond_insn (rtx_insn *insn, int cond)
{
rtx c_insn = 0;
rtx pat, dst, src;
{
int size;
- if (targetm.calls.must_pass_in_stack (mode, type))
+ function_arg_info arg (const_cast<tree> (type), mode, /*named=*/true);
+ if (targetm.calls.must_pass_in_stack (arg))
return 0;
if (type && mode == BLKmode)
Value is zero to push the argument on the stack,
or a hard register in which to store the argument.
- MODE is the argument's machine mode.
- TYPE is the data type of the argument (as a tree).
- This is null for libcalls where that information may
- not be available.
CUM is a variable of type CUMULATIVE_ARGS which gives info about
the preceding args and about the function being called.
- NAMED is nonzero if this argument is a named parameter
- (otherwise it is an extra parameter matching an ellipsis).
+ ARG is a description of the argument.
On MCore the first args are normally in registers
and the rest are pushed. Any arg that starts within the first
its data type forbids. */
static rtx
-mcore_function_arg (cumulative_args_t cum, machine_mode mode,
- const_tree type, bool named)
+mcore_function_arg (cumulative_args_t cum, const function_arg_info &arg)
{
int arg_reg;
- if (! named || mode == VOIDmode)
+ if (!arg.named || arg.end_marker_p ())
return 0;
- if (targetm.calls.must_pass_in_stack (mode, type))
+ if (targetm.calls.must_pass_in_stack (arg))
return 0;
- arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
+ arg_reg = ROUND_REG (*get_cumulative_args (cum), arg.mode);
if (arg_reg < NPARM_REGS)
- return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
+ return handle_structs_in_regs (arg.mode, arg.type,
+ FIRST_PARM_REG + arg_reg);
return 0;
}
static void
-mcore_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
- const_tree type, bool named ATTRIBUTE_UNUSED)
+mcore_function_arg_advance (cumulative_args_t cum_v,
+ const function_arg_info &arg)
{
CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
- *cum = (ROUND_REG (*cum, mode)
- + (int)named * mcore_num_arg_regs (mode, type));
+ *cum = (ROUND_REG (*cum, arg.mode)
+ + (int) arg.named * mcore_num_arg_regs (arg.mode, arg.type));
}
static unsigned int
}
/* Returns the number of bytes of argument registers required to hold *part*
- of a parameter of machine mode MODE and type TYPE (which may be NULL if
- the type is not known). If the argument fits entirely in the argument
- registers, or entirely on the stack, then 0 is returned. CUM is the
- number of argument registers already used by earlier parameters to
- the function. */
+ of argument ARG. If the argument fits entirely in the argument registers,
+ or entirely on the stack, then 0 is returned. CUM is the number of
+ argument registers already used by earlier parameters to the function. */
static int
-mcore_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
- tree type, bool named)
+mcore_arg_partial_bytes (cumulative_args_t cum, const function_arg_info &arg)
{
- int reg = ROUND_REG (*get_cumulative_args (cum), mode);
+ int reg = ROUND_REG (*get_cumulative_args (cum), arg.mode);
- if (named == 0)
+ if (!arg.named)
return 0;
- if (targetm.calls.must_pass_in_stack (mode, type))
+ if (targetm.calls.must_pass_in_stack (arg))
return 0;
/* REG is not the *hardware* register number of the register that holds
return 0;
/* If the argument fits entirely in registers, return 0. */
- if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
+ if (reg + mcore_num_arg_regs (arg.mode, arg.type) <= NPARM_REGS)
return 0;
/* The argument overflows the number of available argument registers.
return false;
}
+/* Implement TARGET_HARD_REGNO_MODE_OK. We may keep double values in
+ even registers. */
+
+static bool
+mcore_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
+{
+ if (TARGET_8ALIGN && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return (regno & 1) == 0;
+
+ return regno < 18;
+}
+
+/* Implement TARGET_MODES_TIEABLE_P. */
+
+static bool
+mcore_modes_tieable_p (machine_mode mode1, machine_mode mode2)
+{
+ return mode1 == mode2 || GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2);
+}