/* Output routines for Motorola MCore processor
- Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
- 2009, 2010 Free Software Foundation, Inc.
+ Copyright (C) 1993-2020 Free Software Foundation, Inc.
This file is part of GCC.
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
+#define IN_TARGET_CODE 1
+
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
+#include "target.h"
#include "rtl.h"
#include "tree.h"
+#include "df.h"
+#include "memmodel.h"
#include "tm_p.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "emit-rtl.h"
+#include "diagnostic-core.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
#include "mcore.h"
-#include "regs.h"
-#include "hard-reg-set.h"
-#include "insn-config.h"
-#include "conditions.h"
#include "output.h"
-#include "insn-attr.h"
-#include "flags.h"
-#include "obstack.h"
+#include "explow.h"
#include "expr.h"
-#include "reload.h"
-#include "recog.h"
-#include "function.h"
-#include "ggc.h"
-#include "diagnostic-core.h"
-#include "target.h"
+#include "cfgrtl.h"
+#include "builtins.h"
+#include "regs.h"
+
+/* This file should be included last. */
#include "target-def.h"
-#include "df.h"
/* For dumping information about frame sizes. */
char * mcore_current_function_name = 0;
static void output_stack_adjust (int, int);
static int calc_live_regs (int *);
-static int try_constant_tricks (long, HOST_WIDE_INT *, HOST_WIDE_INT *);
-static const char * output_inline_const (enum machine_mode, rtx *);
+static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
+static const char * output_inline_const (machine_mode, rtx *);
static void layout_mcore_frame (struct mcore_frame *);
-static void mcore_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
+static void mcore_setup_incoming_varargs (cumulative_args_t,
+ const function_arg_info &,
+ int *, int);
static cond_type is_cond_candidate (rtx);
-static rtx emit_new_cond_insn (rtx, int);
-static rtx conditionalize_block (rtx);
+static rtx_insn *emit_new_cond_insn (rtx_insn *, int);
+static rtx_insn *conditionalize_block (rtx_insn *);
static void conditionalize_optimization (void);
static void mcore_reorg (void);
-static rtx handle_structs_in_regs (enum machine_mode, const_tree, int);
+static rtx handle_structs_in_regs (machine_mode, const_tree, int);
static void mcore_mark_dllexport (tree);
static void mcore_mark_dllimport (tree);
static int mcore_dllexport_p (tree);
unsigned int, tree);
#endif
static void mcore_print_operand (FILE *, rtx, int);
-static void mcore_print_operand_address (FILE *, rtx);
+static void mcore_print_operand_address (FILE *, machine_mode, rtx);
static bool mcore_print_operand_punct_valid_p (unsigned char code);
static void mcore_unique_section (tree, int);
static void mcore_encode_section_info (tree, rtx, int);
static const char *mcore_strip_name_encoding (const char *);
-static int mcore_const_costs (rtx, RTX_CODE);
-static int mcore_and_cost (rtx);
-static int mcore_ior_cost (rtx);
-static bool mcore_rtx_costs (rtx, int, int, int *, bool);
+static int mcore_const_costs (rtx, RTX_CODE);
+static int mcore_and_cost (rtx);
+static int mcore_ior_cost (rtx);
+static bool mcore_rtx_costs (rtx, machine_mode, int, int,
+ int *, bool);
static void mcore_external_libcall (rtx);
static bool mcore_return_in_memory (const_tree, const_tree);
-static int mcore_arg_partial_bytes (CUMULATIVE_ARGS *,
- enum machine_mode,
- tree, bool);
-static rtx mcore_function_arg (CUMULATIVE_ARGS *,
- enum machine_mode,
- const_tree, bool);
-static void mcore_function_arg_advance (CUMULATIVE_ARGS *,
- enum machine_mode,
- const_tree, bool);
-static unsigned int mcore_function_arg_boundary (enum machine_mode,
+static int mcore_arg_partial_bytes (cumulative_args_t,
+ const function_arg_info &);
+static rtx mcore_function_arg (cumulative_args_t,
+ const function_arg_info &);
+static void mcore_function_arg_advance (cumulative_args_t,
+ const function_arg_info &);
+static unsigned int mcore_function_arg_boundary (machine_mode,
const_tree);
static void mcore_asm_trampoline_template (FILE *);
static void mcore_trampoline_init (rtx, tree, rtx);
+static bool mcore_warn_func_return (tree);
static void mcore_option_override (void);
+static bool mcore_legitimate_constant_p (machine_mode, rtx);
+static bool mcore_legitimate_address_p (machine_mode, rtx, bool,
+ addr_space_t);
+static bool mcore_hard_regno_mode_ok (unsigned int, machine_mode);
+static bool mcore_modes_tieable_p (machine_mode, machine_mode);
\f
/* MCore specific attributes. */
static const struct attribute_spec mcore_attribute_table[] =
{
- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
- affects_type_identity } */
- { "dllexport", 0, 0, true, false, false, NULL, false },
- { "dllimport", 0, 0, true, false, false, NULL, false },
- { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute,
- false },
- { NULL, 0, 0, false, false, false, NULL, false }
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
+ affects_type_identity, handler, exclude } */
+ { "dllexport", 0, 0, true, false, false, false, NULL, NULL },
+ { "dllimport", 0, 0, true, false, false, false, NULL, NULL },
+ { "naked", 0, 0, true, false, false, false,
+ mcore_handle_naked_attribute, NULL },
+ { NULL, 0, 0, false, false, false, false, NULL, NULL }
};
-
-/* What options are we going to default to specific settings when
- -O* happens; the user can subsequently override these settings.
-
- Omitting the frame pointer is a very good idea on the MCore.
- Scheduling isn't worth anything on the current MCore implementation. */
-
-static const struct default_options mcore_option_optimization_table[] =
- {
- { OPT_LEVELS_1_PLUS, OPT_ffunction_cse, NULL, 0 },
- { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
- { OPT_LEVELS_ALL, OPT_fcaller_saves, NULL, 0 },
- { OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 },
- { OPT_LEVELS_ALL, OPT_fschedule_insns2, NULL, 0 },
- { OPT_LEVELS_SIZE, OPT_mhardlit, NULL, 0 },
- { OPT_LEVELS_NONE, 0, NULL, 0 }
- };
\f
/* Initialize the GCC target structure. */
#undef TARGET_ASM_EXTERNAL_LIBCALL
#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
#undef TARGET_ASM_FUNCTION_RODATA_SECTION
#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
-#undef TARGET_DEFAULT_TARGET_FLAGS
-#define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
#undef TARGET_ENCODE_SECTION_INFO
#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
#undef TARGET_STRIP_NAME_ENCODING
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS mcore_rtx_costs
#undef TARGET_ADDRESS_COST
-#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
+#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
#undef TARGET_MACHINE_DEPENDENT_REORG
#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
#undef TARGET_OPTION_OVERRIDE
#define TARGET_OPTION_OVERRIDE mcore_option_override
-#undef TARGET_OPTION_OPTIMIZATION_TABLE
-#define TARGET_OPTION_OPTIMIZATION_TABLE mcore_option_optimization_table
-#undef TARGET_EXCEPT_UNWIND_INFO
-#define TARGET_EXCEPT_UNWIND_INFO sjlj_except_unwind_info
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
+#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
+#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P mcore_legitimate_address_p
+
+#undef TARGET_LRA_P
+#define TARGET_LRA_P hook_bool_void_false
+
+#undef TARGET_WARN_FUNC_RETURN
+#define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
+
+#undef TARGET_HARD_REGNO_MODE_OK
+#define TARGET_HARD_REGNO_MODE_OK mcore_hard_regno_mode_ok
+
+#undef TARGET_MODES_TIEABLE_P
+#define TARGET_MODES_TIEABLE_P mcore_modes_tieable_p
+
+#undef TARGET_CONSTANT_ALIGNMENT
+#define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
+
+#undef TARGET_HAVE_SPECULATION_SAFE_VALUE
+#define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
struct gcc_target targetm = TARGET_INITIALIZER;
\f
for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
{
- if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
+ if (df_regs_ever_live_p (reg) && !call_used_or_fixed_reg_p (reg))
{
(*count)++;
live_regs_mask |= (1 << reg);
/* Print the operand address in x to the stream. */
static void
-mcore_print_operand_address (FILE * stream, rtx x)
+mcore_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx x)
{
switch (GET_CODE (x))
{
break;
case MEM:
mcore_print_operand_address
- (stream, XEXP (adjust_address (x, SImode, 4), 0));
+ (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
break;
default:
gcc_unreachable ();
fputs (reg_names[REGNO (x)], (stream));
break;
case MEM:
- output_address (XEXP (x, 0));
+ output_address (GET_MODE (x), XEXP (x, 0));
break;
default:
output_addr_const (stream, x);
}
static bool
-mcore_rtx_costs (rtx x, int code, int outer_code, int * total,
- bool speed ATTRIBUTE_UNUSED)
+mcore_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
+ int opno ATTRIBUTE_UNUSED,
+ int * total, bool speed ATTRIBUTE_UNUSED)
{
+ int code = GET_CODE (x);
+
switch (code)
{
case CONST_INT:
case EQ: /* Use inverted condition, cmpne. */
code = NE;
invert = true;
- /* Drop through. */
+ /* FALLTHRU */
case NE: /* Use normal condition, cmpne. */
if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
case LE: /* Use inverted condition, reversed cmplt. */
code = GT;
invert = true;
- /* Drop through. */
+ /* FALLTHRU */
case GT: /* Use normal condition, reversed cmplt. */
if (GET_CODE (op1) == CONST_INT)
case GE: /* Use inverted condition, cmplt. */
code = LT;
invert = true;
- /* Drop through. */
+ /* FALLTHRU */
case LT: /* Use normal condition, cmplt. */
if (GET_CODE (op1) == CONST_INT &&
gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
code = LEU;
invert = true;
- /* Drop through. */
+ /* FALLTHRU */
case LEU: /* Use normal condition, reversed cmphs. */
if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
case LTU: /* Use inverted condition, cmphs. */
code = GEU;
invert = true;
- /* Drop through. */
+ /* FALLTHRU */
case GEU: /* Use normal condition, cmphs. */
if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
break;
}
- emit_insn (gen_rtx_SET (VOIDmode,
- cc_reg,
- gen_rtx_fmt_ee (code, CCmode, op0, op1)));
+ emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
return invert;
}
can ignore subregs by extracting the actual register. BRC */
int
-mcore_is_dead (rtx first, rtx reg)
+mcore_is_dead (rtx_insn *first, rtx reg)
{
- rtx insn;
+ rtx_insn *insn;
/* For mcore, subregs can't live independently of their parent regs. */
if (GET_CODE (reg) == SUBREG)
to assume that it is live. */
for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
{
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
return 0; /* We lose track, assume it is alive. */
- else if (GET_CODE(insn) == CALL_INSN)
+ else if (CALL_P (insn))
{
/* Call's might use it for target or register parms. */
if (reg_referenced_p (reg, PATTERN (insn))
else if (dead_or_set_p (insn, reg))
return 1;
}
- else if (GET_CODE (insn) == INSN)
+ else if (NONJUMP_INSN_P (insn))
{
if (reg_referenced_p (reg, PATTERN (insn)))
return 0;
/* Output an inline constant. */
static const char *
-output_inline_const (enum machine_mode mode, rtx operands[])
+output_inline_const (machine_mode mode, rtx operands[])
{
HOST_WIDE_INT x = 0, y = 0;
int trick_no;
const char *
mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
- enum machine_mode mode ATTRIBUTE_UNUSED)
+ machine_mode mode ATTRIBUTE_UNUSED)
{
rtx dst = operands[0];
rtx src = operands[1];
else
switch (GET_MODE (src)) /* r-m */
{
- case SImode:
+ case E_SImode:
return "ldw\t%0,%1";
- case HImode:
+ case E_HImode:
return "ld.h\t%0,%1";
- case QImode:
+ case E_QImode:
return "ld.b\t%0,%1";
default:
gcc_unreachable ();
else if (GET_CODE (dst) == MEM) /* m-r */
switch (GET_MODE (dst))
{
- case SImode:
+ case E_SImode:
return "stw\t%1,%0";
- case HImode:
+ case E_HImode:
return "st.h\t%1,%0";
- case QImode:
+ case E_QImode:
return "st.b\t%1,%0";
default:
gcc_unreachable ();
to take care when we see overlapping source and dest registers. */
const char *
-mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED)
+mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
{
rtx dst = operands[0];
rtx src = operands[1];
}
else if (GET_CODE (src) == MEM)
{
- rtx memexp = memexp = XEXP (src, 0);
+ rtx memexp = XEXP (src, 0);
int dstreg = REGNO (dst);
int basereg = -1;
if ((INTVAL (operands[3]) & 1) == 0)
{
mask = ~(1 << posn);
- emit_insn (gen_rtx_SET (SImode, operands[0],
- gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
+ emit_insn (gen_rtx_SET (operands[0],
+ gen_rtx_AND (SImode, operands[0],
+ GEN_INT (mask))));
}
else
{
mask = 1 << posn;
- emit_insn (gen_rtx_SET (SImode, operands[0],
- gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
+ emit_insn (gen_rtx_SET (operands[0],
+ gen_rtx_IOR (SImode, operands[0],
+ GEN_INT (mask))));
}
return 1;
&& INTVAL (operands[3]) == ((1 << width) - 1))
{
mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
- emit_insn (gen_rtx_SET (SImode, operands[0],
- gen_rtx_IOR (SImode, operands[0], mreg)));
+ emit_insn (gen_rtx_SET (operands[0],
+ gen_rtx_IOR (SImode, operands[0], mreg)));
return 1;
}
mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
/* Clear the field, to overlay it later with the source. */
- emit_insn (gen_rtx_SET (SImode, operands[0],
- gen_rtx_AND (SImode, operands[0], mreg)));
+ emit_insn (gen_rtx_SET (operands[0],
+ gen_rtx_AND (SImode, operands[0], mreg)));
/* If the source is constant 0, we've nothing to add back. */
if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
if (width + posn != (int) GET_MODE_SIZE (SImode))
{
ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
- emit_insn (gen_rtx_SET (SImode, sreg,
- gen_rtx_AND (SImode, sreg, ereg)));
+ emit_insn (gen_rtx_SET (sreg, gen_rtx_AND (SImode, sreg, ereg)));
}
/* Insert source value in dest. */
if (posn != 0)
- emit_insn (gen_rtx_SET (SImode, sreg,
- gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
+ emit_insn (gen_rtx_SET (sreg, gen_rtx_ASHIFT (SImode, sreg,
+ GEN_INT (posn))));
- emit_insn (gen_rtx_SET (SImode, operands[0],
- gen_rtx_IOR (SImode, operands[0], sreg)));
+ emit_insn (gen_rtx_SET (operands[0],
+ gen_rtx_IOR (SImode, operands[0], sreg)));
return 1;
}
known constants. DEST and SRC are registers. OFFSET is the known
starting point for the output pattern. */
-static const enum machine_mode mode_from_align[] =
+static const machine_mode mode_from_align[] =
{
VOIDmode, QImode, HImode, VOIDmode, SImode,
};
block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
{
rtx temp[2];
- enum machine_mode mode[2];
+ machine_mode mode[2];
int amount[2];
bool active[2];
int phase = 0;
temp[next] = gen_reg_rtx (mode[next]);
x = adjust_address (src_mem, mode[next], offset_ld);
- emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
+ emit_insn (gen_rtx_SET (temp[next], x));
offset_ld += next_amount;
size -= next_amount;
active[phase] = false;
x = adjust_address (dst_mem, mode[phase], offset_st);
- emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
+ emit_insn (gen_rtx_SET (x, temp[phase]));
offset_st += amount[phase];
}
/* Keep track of some information about varargs for the prolog. */
static void
-mcore_setup_incoming_varargs (CUMULATIVE_ARGS *args_so_far,
- enum machine_mode mode, tree type,
+mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
+ const function_arg_info &arg,
int * ptr_pretend_size ATTRIBUTE_UNUSED,
int second_time ATTRIBUTE_UNUSED)
{
+ CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
+
current_function_anonymous_args = 1;
/* We need to know how many argument registers are used before
the varargs start, so that we can push the remaining argument
registers during the prologue. */
- number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
+ number_of_regs_before_varargs
+ = *args_so_far + mcore_num_arg_regs (arg.mode, arg.type);
/* There is a bug somewhere in the arg handling code.
Until I can find it this workaround always pushes the
gcc_assert (GET_CODE (x) == SYMBOL_REF);
- if (mcore_current_function_name)
- free (mcore_current_function_name);
+ free (mcore_current_function_name);
mcore_current_function_name = xstrdup (XSTR (x, 0));
{
emit_insn (gen_movsi
(gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx, offset)),
+ plus_constant (Pmode, stack_pointer_rtx,
+ offset)),
gen_rtx_REG (SImode, rn)));
}
}
{
emit_insn (gen_movsi
(gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx, offs)),
+ plus_constant (Pmode, stack_pointer_rtx,
+ offs)),
gen_rtx_REG (SImode, i)));
offs += 4;
}
emit_insn (gen_movsi
(gen_rtx_REG (SImode, i),
gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx, offs))));
+ plus_constant (Pmode, stack_pointer_rtx,
+ offs))));
offs += 4;
}
}
changed into a conditional. Only bother with SImode items. If
we wanted to be a little more aggressive, we could also do other
modes such as DImode with reg-reg move or load 0. */
- if (GET_CODE (insn) == INSN)
+ if (NONJUMP_INSN_P (insn))
{
rtx pat = PATTERN (insn);
rtx src, dst;
*/
}
- else if (GET_CODE (insn) == JUMP_INSN &&
- GET_CODE (PATTERN (insn)) == SET &&
- GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
+ else if (JUMP_P (insn)
+ && GET_CODE (PATTERN (insn)) == SET
+ && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
return COND_BRANCH_INSN;
return COND_NO;
/* Emit a conditional version of insn and replace the old insn with the
new one. Return the new insn if emitted. */
-static rtx
-emit_new_cond_insn (rtx insn, int cond)
+static rtx_insn *
+emit_new_cond_insn (rtx_insn *insn, int cond)
{
rtx c_insn = 0;
rtx pat, dst, src;
pat = PATTERN (insn);
- if (GET_CODE (insn) == INSN)
+ if (NONJUMP_INSN_P (insn))
{
dst = SET_DEST (pat);
src = SET_SRC (pat);
delete_insn (insn);
- return c_insn;
+ return as_a <rtx_insn *> (c_insn);
}
/* Attempt to change a basic block into a series of conditional insns. This
we can delete the L2 label if NUSES==1 and re-apply the optimization
starting at the last instruction of block 2. This may allow an entire
if-then-else statement to be conditionalized. BRC */
-static rtx
-conditionalize_block (rtx first)
+static rtx_insn *
+conditionalize_block (rtx_insn *first)
{
- rtx insn;
+ rtx_insn *insn;
rtx br_pat;
- rtx end_blk_1_br = 0;
- rtx end_blk_2_insn = 0;
- rtx start_blk_3_lab = 0;
+ rtx_insn *end_blk_1_br = 0;
+ rtx_insn *end_blk_2_insn = 0;
+ rtx_insn *start_blk_3_lab = 0;
int cond;
int br_lab_num;
int blk_size = 0;
/* Check that the first insn is a candidate conditional jump. This is
the one that we'll eliminate. If not, advance to the next insn to
try. */
- if (GET_CODE (first) != JUMP_INSN ||
- GET_CODE (PATTERN (first)) != SET ||
- GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
+ if (! JUMP_P (first)
+ || GET_CODE (PATTERN (first)) != SET
+ || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
return NEXT_INSN (first);
/* Extract some information we need. */
for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
insn = NEXT_INSN (insn))
{
- rtx newinsn;
+ rtx_insn *newinsn;
- if (INSN_DELETED_P (insn))
+ if (insn->deleted ())
continue;
/* Try to form a conditional variant of the instruction and emit it. */
static void
conditionalize_optimization (void)
{
- rtx insn;
+ rtx_insn *insn;
for (insn = get_insns (); insn; insn = conditionalize_block (insn))
continue;
}
-static int saved_warn_return_type = -1;
-static int saved_warn_return_type_count = 0;
-
/* This is to handle loads from the constant pool. */
static void
/* Reset this variable. */
current_function_anonymous_args = 0;
- /* Restore the warn_return_type if it has been altered. */
- if (saved_warn_return_type != -1)
- {
- /* Only restore the value if we have reached another function.
- The test of warn_return_type occurs in final_function () in
- c-decl.c a long time after the code for the function is generated,
- so we need a counter to tell us when we have finished parsing that
- function and can restore the flag. */
- if (--saved_warn_return_type_count == 0)
- {
- warn_return_type = saved_warn_return_type;
- saved_warn_return_type = -1;
- }
- }
-
if (optimize == 0)
return;
enum reg_class
mcore_secondary_reload_class (enum reg_class rclass,
- enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+ machine_mode mode ATTRIBUTE_UNUSED, rtx x)
{
if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
&& !mcore_r15_operand_p (x))
hold a function argument of mode MODE and type TYPE. */
int
-mcore_num_arg_regs (enum machine_mode mode, const_tree type)
+mcore_num_arg_regs (machine_mode mode, const_tree type)
{
int size;
- if (targetm.calls.must_pass_in_stack (mode, type))
+ function_arg_info arg (const_cast<tree> (type), mode, /*named=*/true);
+ if (targetm.calls.must_pass_in_stack (arg))
return 0;
if (type && mode == BLKmode)
}
static rtx
-handle_structs_in_regs (enum machine_mode mode, const_tree type, int reg)
+handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
{
int size;
rtx
mcore_function_value (const_tree valtype, const_tree func)
{
- enum machine_mode mode;
+ machine_mode mode;
int unsigned_p;
mode = TYPE_MODE (valtype);
Value is zero to push the argument on the stack,
or a hard register in which to store the argument.
- MODE is the argument's machine mode.
- TYPE is the data type of the argument (as a tree).
- This is null for libcalls where that information may
- not be available.
CUM is a variable of type CUMULATIVE_ARGS which gives info about
the preceding args and about the function being called.
- NAMED is nonzero if this argument is a named parameter
- (otherwise it is an extra parameter matching an ellipsis).
+ ARG is a description of the argument.
On MCore the first args are normally in registers
and the rest are pushed. Any arg that starts within the first
its data type forbids. */
static rtx
-mcore_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
- const_tree type, bool named)
+mcore_function_arg (cumulative_args_t cum, const function_arg_info &arg)
{
int arg_reg;
- if (! named || mode == VOIDmode)
+ if (!arg.named || arg.end_marker_p ())
return 0;
- if (targetm.calls.must_pass_in_stack (mode, type))
+ if (targetm.calls.must_pass_in_stack (arg))
return 0;
- arg_reg = ROUND_REG (*cum, mode);
+ arg_reg = ROUND_REG (*get_cumulative_args (cum), arg.mode);
if (arg_reg < NPARM_REGS)
- return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
+ return handle_structs_in_regs (arg.mode, arg.type,
+ FIRST_PARM_REG + arg_reg);
return 0;
}
static void
-mcore_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
- const_tree type, bool named ATTRIBUTE_UNUSED)
+mcore_function_arg_advance (cumulative_args_t cum_v,
+ const function_arg_info &arg)
{
- *cum = (ROUND_REG (*cum, mode)
- + (int)named * mcore_num_arg_regs (mode, type));
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+
+ *cum = (ROUND_REG (*cum, arg.mode)
+ + (int) arg.named * mcore_num_arg_regs (arg.mode, arg.type));
}
static unsigned int
-mcore_function_arg_boundary (enum machine_mode mode,
+mcore_function_arg_boundary (machine_mode mode,
const_tree type ATTRIBUTE_UNUSED)
{
/* Doubles must be aligned to an 8 byte boundary. */
}
/* Returns the number of bytes of argument registers required to hold *part*
- of a parameter of machine mode MODE and type TYPE (which may be NULL if
- the type is not known). If the argument fits entirely in the argument
- registers, or entirely on the stack, then 0 is returned. CUM is the
- number of argument registers already used by earlier parameters to
- the function. */
+ of argument ARG. If the argument fits entirely in the argument registers,
+ or entirely on the stack, then 0 is returned. CUM is the number of
+ argument registers already used by earlier parameters to the function. */
static int
-mcore_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
- tree type, bool named)
+mcore_arg_partial_bytes (cumulative_args_t cum, const function_arg_info &arg)
{
- int reg = ROUND_REG (*cum, mode);
+ int reg = ROUND_REG (*get_cumulative_args (cum), arg.mode);
- if (named == 0)
+ if (!arg.named)
return 0;
- if (targetm.calls.must_pass_in_stack (mode, type))
+ if (targetm.calls.must_pass_in_stack (arg))
return 0;
/* REG is not the *hardware* register number of the register that holds
return 0;
/* If the argument fits entirely in registers, return 0. */
- if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
+ if (reg + mcore_num_arg_regs (arg.mode, arg.type) <= NPARM_REGS)
return 0;
/* The argument overflows the number of available argument registers.
mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
{
- if (TREE_CODE (*node) == FUNCTION_DECL)
- {
- /* PR14310 - don't complain about lack of return statement
- in naked functions. The solution here is a gross hack
- but this is the only way to solve the problem without
- adding a new feature to GCC. I did try submitting a patch
- that would add such a new feature, but it was (rightfully)
- rejected on the grounds that it was creeping featurism,
- so hence this code. */
- if (warn_return_type)
- {
- saved_warn_return_type = warn_return_type;
- warn_return_type = 0;
- saved_warn_return_type_count = 2;
- }
- else if (saved_warn_return_type_count)
- saved_warn_return_type_count = 2;
- }
- else
+ if (TREE_CODE (*node) != FUNCTION_DECL)
{
warning (OPT_Wattributes, "%qE attribute only applies to functions",
name);
sprintf (string, "%s%s", prefix, name);
- DECL_SECTION_NAME (decl) = build_string (len, string);
+ set_decl_section_name (decl, string);
}
int
return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
}
+static bool
+mcore_warn_func_return (tree decl)
+{
+ /* Naked functions are implemented entirely in assembly, including the
+ return sequence, so suppress warnings about this. */
+ return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
+}
+
#ifdef OBJECT_FORMAT_ELF
static void
mcore_asm_named_section (const char *name,
mem = adjust_address (m_tramp, SImode, 12);
emit_move_insn (mem, fnaddr);
}
+
+/* Implement TARGET_LEGITIMATE_CONSTANT_P
+
+ On the MCore, allow anything but a double. */
+
+static bool
+mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+{
+ return GET_CODE (x) != CONST_DOUBLE;
+}
+
+/* Helper function for `mcore_legitimate_address_p'. */
+
+static bool
+mcore_reg_ok_for_base_p (const_rtx reg, bool strict_p)
+{
+ if (strict_p)
+ return REGNO_OK_FOR_BASE_P (REGNO (reg));
+ else
+ return (REGNO (reg) <= 16 || !HARD_REGISTER_P (reg));
+}
+
+static bool
+mcore_base_register_rtx_p (const_rtx x, bool strict_p)
+{
+ return REG_P(x) && mcore_reg_ok_for_base_p (x, strict_p);
+}
+
+/* A legitimate index for a QI is 0..15, for HI is 0..30, for SI is 0..60,
+ and for DI is 0..56 because we use two SI loads, etc. */
+
+static bool
+mcore_legitimate_index_p (machine_mode mode, const_rtx op)
+{
+ if (CONST_INT_P (op))
+ {
+ if (GET_MODE_SIZE (mode) >= 4
+ && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 4) == 0
+ && ((unsigned HOST_WIDE_INT) INTVAL (op))
+ <= (unsigned HOST_WIDE_INT) 64 - GET_MODE_SIZE (mode))
+ return true;
+ if (GET_MODE_SIZE (mode) == 2
+ && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 2) == 0
+ && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 30)
+ return true;
+ if (GET_MODE_SIZE (mode) == 1
+ && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 15)
+ return true;
+ }
+ return false;
+}
+
+
+/* Worker function for TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P.
+
+ Allow REG
+ REG + disp */
+
+static bool
+mcore_legitimate_address_p (machine_mode mode, rtx x, bool strict_p,
+ addr_space_t as)
+{
+ gcc_assert (ADDR_SPACE_GENERIC_P (as));
+
+ if (mcore_base_register_rtx_p (x, strict_p))
+ return true;
+ else if (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
+ {
+ rtx xop0 = XEXP (x, 0);
+ rtx xop1 = XEXP (x, 1);
+ if (mcore_base_register_rtx_p (xop0, strict_p)
+ && mcore_legitimate_index_p (mode, xop1))
+ return true;
+ if (mcore_base_register_rtx_p (xop1, strict_p)
+ && mcore_legitimate_index_p (mode, xop0))
+ return true;
+ }
+
+ return false;
+}
+
+/* Implement TARGET_HARD_REGNO_MODE_OK. We may keep double values in
+ even registers. */
+
+static bool
+mcore_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
+{
+ if (TARGET_8ALIGN && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return (regno & 1) == 0;
+
+ return regno < 18;
+}
+
+/* Implement TARGET_MODES_TIEABLE_P. */
+
+static bool
+mcore_modes_tieable_p (machine_mode mode1, machine_mode mode2)
+{
+ return mode1 == mode2 || GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2);
+}