/* Output routines for GCC for Renesas / SuperH SH.
- Copyright (C) 1993-2017 Free Software Foundation, Inc.
+ Copyright (C) 1993-2021 Free Software Foundation, Inc.
Contributed by Steve Chamberlain (sac@cygnus.com).
Improved by Jim Wilson (wilson@cygnus.com).
#include <sstream>
+#define IN_TARGET_CODE 1
+
#include "config.h"
#define INCLUDE_VECTOR
#include "system.h"
#include "builtins.h"
#include "rtl-iter.h"
#include "regs.h"
+#include "toplev.h"
/* This file should be included last. */
#include "target-def.h"
HOST_WIDE_INT, tree);
static void sh_file_start (void);
static bool sh_assemble_integer (rtx, unsigned int, int);
-static bool flow_dependent_p (rtx, rtx);
+static bool flow_dependent_p (rtx_insn *, rtx_insn *);
static void flow_dependent_p_1 (rtx, const_rtx, void *);
static int shiftcosts (rtx);
static int and_xor_ior_costs (rtx, int);
static rtx sh_legitimize_address (rtx, rtx, machine_mode);
static rtx sh_delegitimize_address (rtx);
static bool sh_cannot_substitute_mem_equiv_p (rtx);
-static bool sh_legitimize_address_displacement (rtx *, rtx *, machine_mode);
+static bool sh_legitimize_address_displacement (rtx *, rtx *,
+ poly_int64, machine_mode);
static int scavenge_reg (HARD_REG_SET *s);
static rtx sh_struct_value_rtx (tree, int);
static rtx sh_libcall_value (machine_mode, const_rtx);
static bool sh_return_in_memory (const_tree, const_tree);
static rtx sh_builtin_saveregs (void);
-static void sh_setup_incoming_varargs (cumulative_args_t, machine_mode,
- tree, int *, int);
+static void sh_setup_incoming_varargs (cumulative_args_t,
+ const function_arg_info &, int *, int);
static bool sh_strict_argument_naming (cumulative_args_t);
static bool sh_pretend_outgoing_varargs_named (cumulative_args_t);
static void sh_atomic_assign_expand_fenv (tree *, tree *, tree *);
int *punsignedp,
const_tree funtype,
int for_return);
-static bool sh_pass_by_reference (cumulative_args_t, machine_mode,
- const_tree, bool);
-static bool sh_callee_copies (cumulative_args_t, machine_mode,
- const_tree, bool);
-static int sh_arg_partial_bytes (cumulative_args_t, machine_mode,
- tree, bool);
-static void sh_function_arg_advance (cumulative_args_t, machine_mode,
- const_tree, bool);
-static rtx sh_function_arg (cumulative_args_t, machine_mode,
- const_tree, bool);
+static bool sh_pass_by_reference (cumulative_args_t,
+ const function_arg_info &);
+static bool sh_callee_copies (cumulative_args_t, const function_arg_info &);
+static int sh_arg_partial_bytes (cumulative_args_t, const function_arg_info &);
+static void sh_function_arg_advance (cumulative_args_t,
+ const function_arg_info &);
+static rtx sh_function_arg (cumulative_args_t, const function_arg_info &);
static int sh_dwarf_calling_convention (const_tree);
static void sh_encode_section_info (tree, rtx, int);
static bool sh2a_function_vector_p (tree);
\f
static const struct attribute_spec sh_attribute_table[] =
{
- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
- affects_type_identity, exclusions } */
- { "interrupt_handler", 0, 0, true, false, false,
- sh_handle_interrupt_handler_attribute, false, NULL },
- { "sp_switch", 1, 1, true, false, false,
- sh_handle_sp_switch_attribute, false, NULL },
- { "trap_exit", 1, 1, true, false, false,
- sh_handle_trap_exit_attribute, false, NULL },
- { "renesas", 0, 0, false, true, false,
- sh_handle_renesas_attribute, false, NULL },
- { "trapa_handler", 0, 0, true, false, false,
- sh_handle_interrupt_handler_attribute, false, NULL },
- { "nosave_low_regs", 0, 0, true, false, false,
- sh_handle_interrupt_handler_attribute, false, NULL },
- { "resbank", 0, 0, true, false, false,
- sh_handle_resbank_handler_attribute, false, NULL },
- { "function_vector", 1, 1, true, false, false,
- sh2a_handle_function_vector_handler_attribute, false, NULL },
- { NULL, 0, 0, false, false, false, NULL, false, NULL }
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
+ affects_type_identity, handler, exclude } */
+ { "interrupt_handler", 0, 0, true, false, false, false,
+ sh_handle_interrupt_handler_attribute, NULL },
+ { "sp_switch", 1, 1, true, false, false, false,
+ sh_handle_sp_switch_attribute, NULL },
+ { "trap_exit", 1, 1, true, false, false, false,
+ sh_handle_trap_exit_attribute, NULL },
+ { "renesas", 0, 0, false, true, false, false,
+ sh_handle_renesas_attribute, NULL },
+ { "trapa_handler", 0, 0, true, false, false, false,
+ sh_handle_interrupt_handler_attribute, NULL },
+ { "nosave_low_regs", 0, 0, true, false, false, false,
+ sh_handle_interrupt_handler_attribute, NULL },
+ { "resbank", 0, 0, true, false, false, false,
+ sh_handle_resbank_handler_attribute, NULL },
+ { "function_vector", 1, 1, true, false, false, false,
+ sh2a_handle_function_vector_handler_attribute, NULL },
+ { NULL, 0, 0, false, false, false, false, NULL, NULL }
};
\f
/* Initialize the GCC target structure. */
#undef TARGET_CONSTANT_ALIGNMENT
#define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
+#undef TARGET_HAVE_SPECULATION_SAFE_VALUE
+#define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
{
if (tokens[i] == "strict")
ret.strict = true;
- else if (tokens[i].find ("gbr-offset=") == 0)
+ else if (!tokens[i].compare (0, strlen ("gbr-offset="), "gbr-offset="))
{
std::string offset_str = tokens[i].substr (strlen ("gbr-offset="));
ret.tcb_gbr_offset = integral_argument (offset_str.c_str ());
/* Run sh_treg_combine pass after register allocation and basic block
reordering as this sometimes creates new opportunities. */
register_pass (make_pass_sh_treg_combine (g, true, "sh_treg_combine3"),
- PASS_POS_INSERT_AFTER, "split4", 1);
+ PASS_POS_INSERT_AFTER, "split3", 1);
/* Optimize sett and clrt insns, by e.g. removing them if the T bit value
is known after a conditional branch.
to the pressure on R0. */
/* Enable sched1 for SH4 if the user explicitly requests.
When sched1 is enabled, the ready queue will be reordered by
- the target hooks if pressure is high. We can not do this for
+ the target hooks if pressure is high. We cannot do this for
PIC, SH3 and lower as they give spill failures for R0. */
if (!TARGET_HARD_SH4 || flag_pic)
flag_schedule_insns = 0;
else if (flag_exceptions)
{
if (flag_schedule_insns && global_options_set.x_flag_schedule_insns)
- warning (0, "ignoring -fschedule-insns because of exception "
+ warning (0, "ignoring %<-fschedule-insns%> because of exception "
"handling bug");
flag_schedule_insns = 0;
}
&& flag_omit_frame_pointer && !TARGET_ACCUMULATE_OUTGOING_ARGS)
{
warning (0, "unwind tables currently require either a frame pointer "
- "or -maccumulate-outgoing-args for correctness");
+ "or %<-maccumulate-outgoing-args%> for correctness");
TARGET_ACCUMULATE_OUTGOING_ARGS = 1;
}
if (flag_unsafe_math_optimizations)
{
/* Enable fsca insn for SH4A if not otherwise specified by the user. */
- if (global_options_set.x_TARGET_FSCA == 0 && TARGET_SH4A_FP)
+ if (global_options_set.x_TARGET_FSCA == 0
+ && (TARGET_SH4A_FP || TARGET_FPU_SH4_300))
TARGET_FSCA = 1;
/* Enable fsrra insn for SH4A if not otherwise specified by the user. */
- if (global_options_set.x_TARGET_FSRRA == 0 && TARGET_SH4A_FP)
+ if (global_options_set.x_TARGET_FSRRA == 0
+ && (TARGET_SH4A_FP || TARGET_FPU_SH4_300))
TARGET_FSRRA = 1;
}
Aligning all jumps increases the code size, even if it might
result in slightly faster code. Thus, it is set to the smallest
alignment possible if not specified by the user. */
- if (align_loops == 0)
- align_loops = optimize_size ? 2 : 4;
+ if (flag_align_loops && !str_align_loops)
+ str_align_loops = optimize_size ? "2" : "4";
- if (align_jumps == 0)
- align_jumps = 2;
- else if (align_jumps < 2)
- align_jumps = 2;
+ /* Parse values so that we can compare for current value. */
+ parse_alignment_opts ();
+ if (flag_align_jumps && !str_align_jumps)
+ str_align_jumps = "2";
+ else if (align_jumps.levels[0].get_value () < 2)
+ str_align_jumps = "2";
- if (align_functions == 0)
- align_functions = optimize_size ? 2 : 4;
+ if (flag_align_functions && !str_align_functions)
+ str_align_functions = optimize_size ? "2" : "4";
/* The linker relaxation code breaks when a function contains
alignments that are larger than that at the start of a
compilation unit. */
if (TARGET_RELAX)
{
- int min_align = align_loops > align_jumps ? align_loops : align_jumps;
+ /* Parse values so that we can compare for current value. */
+ parse_alignment_opts ();
+ int min_align = MAX (align_loops.levels[0].get_value (),
+ align_jumps.levels[0].get_value ());
/* Also take possible .long constants / mova tables into account. */
if (min_align < 4)
min_align = 4;
- if (align_functions < min_align)
- align_functions = min_align;
+ if (align_functions.levels[0].get_value () < min_align)
+ {
+ char *r = XNEWVEC (char, 16);
+ sprintf (r, "%d", min_align);
+ str_align_functions = r;
+ }
}
}
\f
lsw_taken = EQ;
if (prob.initialized_p ())
{
- /* FIXME: This is not optimal. We do not really know the probablity
+ /* FIXME: This is not optimal. We do not really know the probability
that values differ by MCW only, but we should probably distribute
probabilities more evenly. */
msw_skip_prob = rev_prob;
{
rtx_insn *scan = barrier;
bool need_align = true;
- rtx lab;
+ rtx_code_label *lab;
label_ref_list_t ref;
bool have_df = false;
scan = emit_insn_after (gen_align_2 (), scan);
need_align = false;
}
- for (lab = p->label; lab; lab = LABEL_REFS (lab))
+ for (lab = p->label; lab;
+ lab = safe_as_a <rtx_code_label *> (LABEL_REFS (lab)))
scan = emit_label_after (lab, scan);
scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
scan);
rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
rtx lab = XEXP (XVECEXP (src, 0, 3), 0);
- scan = emit_label_after (lab, scan);
+ scan = emit_label_after (as_a <rtx_insn *> (lab), scan);
}
}
if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
case E_SFmode:
if (align_insn && !p->part_of_sequence_p)
{
- for (lab = p->label; lab; lab = LABEL_REFS (lab))
+ for (lab = p->label; lab;
+ lab = safe_as_a <rtx_code_label *> (LABEL_REFS (lab)))
emit_label_before (lab, align_insn);
emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
align_insn);
}
else
{
- for (lab = p->label; lab; lab = LABEL_REFS (lab))
+ for (lab = p->label; lab;
+ lab = safe_as_a <rtx_code_label *> (LABEL_REFS (lab)))
scan = emit_label_after (lab, scan);
scan = emit_insn_after (gen_consttable_4 (p->value,
const0_rtx), scan);
}
/* FALLTHRU */
case E_DImode:
- for (lab = p->label; lab; lab = LABEL_REFS (lab))
+ for (lab = p->label; lab;
+ lab = safe_as_a <rtx_code_label *> (LABEL_REFS (lab)))
scan = emit_label_after (lab, scan);
scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
scan);
scan = emit_label_after (gen_label_rtx (), scan);
scan = emit_insn_after (gen_align_4 (), scan);
}
- for (lab = p->label; lab; lab = LABEL_REFS (lab))
+ for (lab = p->label; lab;
+ lab = safe_as_a <rtx_code_label *> (LABEL_REFS (lab)))
scan = emit_label_after (lab, scan);
scan = emit_insn_after (gen_consttable_4 (p->value, const0_rtx),
scan);
scan = emit_label_after (gen_label_rtx (), scan);
scan = emit_insn_after (gen_align_4 (), scan);
}
- for (lab = p->label; lab; lab = LABEL_REFS (lab))
+ for (lab = p->label; lab;
+ lab = safe_as_a <rtx_code_label *> (LABEL_REFS (lab)))
scan = emit_label_after (lab, scan);
scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
scan);
&& CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
{
if (optimize)
- new_align = 1 << label_to_alignment (from);
+ new_align = 1 << label_to_alignment (from).levels[0].log;
else if (BARRIER_P (prev_nonnote_insn (from)))
new_align = 1 << barrier_align (from);
else
&& (prev_nonnote_insn (from)
== XEXP (MOVA_LABELREF (mova), 0))))
num_mova--;
- if (barrier_align (next_real_insn (from)) == align_jumps_log)
+ if (barrier_align (next_real_insn (from)) == align_jumps.levels[0].log)
{
/* We have just passed the barrier in front of the
ADDR_DIFF_VEC, which is stored in found_barrier. Since
around the constant pool table will be hit. Putting it before
a jump makes it more likely that the bra delay slot will be
filled. */
- while (NOTE_P (from) || JUMP_P (from)
- || LABEL_P (from))
+ while (NOTE_P (from) || JUMP_P (from) || LABEL_P (from))
from = PREV_INSN (from);
- /* Make sure we do not split between a call and its corresponding
- CALL_ARG_LOCATION note. */
if (CALL_P (from))
{
- rtx_insn *next = NEXT_INSN (from);
- if (next && NOTE_P (next)
- && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
- from = next;
+ bool sibcall_p = SIBLING_CALL_P (from);
+
+ /* If FROM was a sibling call, then we know that control
+ will not return. In fact, we were guaranteed to hit
+ a barrier before another real insn.
+
+ The jump around the constant pool is unnecessary. It
+ costs space, but more importantly it confuses dwarf2cfi
+ generation. */
+ if (sibcall_p)
+ return emit_barrier_after (from);
}
from = emit_jump_insn_after (gen_jump (label), from);
/* Emit the reference label of the braf where it belongs, right after
the casesi_jump_2 (i.e. braf). */
braf_label = XEXP (XEXP (SET_SRC (XVECEXP (prevpat, 0, 0)), 1), 0);
- emit_label_after (braf_label, prev);
+ emit_label_after (as_a <rtx_insn *> (braf_label), prev);
/* Fix up the ADDR_DIF_VEC to be relative
to the reference address of the braf. */
return ((optimize_size
|| ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
<= (unsigned) 1 << (CACHE_LOG - 2)))
- ? 1 : align_jumps_log);
+ ? 1 : align_jumps.levels[0].log);
}
rtx_insn *next = next_active_insn (barrier_or_label);
return 0;
if (! TARGET_SH2 || ! optimize)
- return align_jumps_log;
+ return align_jumps.levels[0].log;
/* When fixing up pcloads, a constant table might be inserted just before
the basic block that ends with the barrier. Thus, we can't trust the
{
rtx_insn *x;
if (jump_to_next
- || next_real_insn (JUMP_LABEL (prev)) == next
+ || next_real_insn (JUMP_LABEL_AS_INSN (prev)) == next
/* If relax_delay_slots() decides NEXT was redundant
with some previous instruction, it will have
redirected PREV's jump to the following insn. */
}
}
- return align_jumps_log;
+ return align_jumps.levels[0].log;
}
/* If we are inside a phony loop, almost any kind of label can turn up as the
|| recog_memoized (next) == CODE_FOR_consttable_2)
return 0;
- return align_loops_log;
+ return align_loops.levels[0].log;
}
/* Do a final pass over the function, just before delayed branch
/* Return the UID of the insn that follows the specified label. */
int
-get_dest_uid (rtx label, int max_uid)
+get_dest_uid (rtx_insn *label, int max_uid)
{
rtx_insn *dest = next_real_insn (label);
if (get_attr_length (insn) > 4)
{
rtx src = SET_SRC (PATTERN (insn));
- rtx olabel = XEXP (XEXP (src, 1), 0);
+ rtx_insn *olabel = safe_as_a <rtx_insn *> (XEXP (XEXP (src, 1), 0));
int addr = INSN_ADDRESSES (INSN_UID (insn));
rtx_insn *label = 0;
int dest_uid = get_dest_uid (olabel, max_uid);
to handle this case, so just die when we see it. */
if (epilogue_p < 0
|| current_function_interrupt
- || ! call_really_used_regs[temp] || fixed_regs[temp])
+ || ! call_used_regs[temp] || fixed_regs[temp])
temp = -1;
if (temp < 0 && ! current_function_interrupt && epilogue_p >= 0)
{
- HARD_REG_SET temps;
- COPY_HARD_REG_SET (temps, call_used_reg_set);
- AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
+ HARD_REG_SET temps = (regs_invalidated_by_call
+ & ~fixed_reg_set
+ & savable_regs);
if (epilogue_p > 0)
{
int nreg = 0;
{
HARD_REG_SET temps;
- COPY_HARD_REG_SET (temps, *live_regs_mask);
+ temps = *live_regs_mask;
CLEAR_HARD_REG_BIT (temps, REGNO (reg));
temp = scavenge_reg (&temps);
}
if (i == FIRST_FP_REG && interrupt_handler && TARGET_FMOVD
&& hard_reg_set_intersect_p (*mask, reg_class_contents[DF_REGS]))
{
- HARD_REG_SET unsaved;
-
push (FPSCR_REG);
- COMPL_HARD_REG_SET (unsaved, *mask);
- fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
+ fpscr_set_from_mem (NORMAL_MODE (FP_MODE), ~*mask);
skip_fpscr = true;
}
if (i != PR_REG
else if (TARGET_FPU_DOUBLE && TARGET_FMOVD && TARGET_FPU_SINGLE)
for (int count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
if (df_regs_ever_live_p (reg) && df_regs_ever_live_p (reg+1)
- && (! call_really_used_regs[reg]
+ && (! call_used_regs[reg]
|| interrupt_handler)
&& ++count > 2)
{
: interrupt_handler
? (/* Need to save all the regs ever live. */
(df_regs_ever_live_p (reg)
- || (call_really_used_regs[reg]
+ || (call_used_regs[reg]
&& (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
|| reg == PIC_OFFSET_TABLE_REGNUM)
&& has_call))
: (/* Only push those regs which are used and need to be saved. */
(false)
|| (df_regs_ever_live_p (reg)
- && ((!call_really_used_regs[reg]
+ && ((!call_used_regs[reg]
&& !(reg != PIC_OFFSET_TABLE_REGNUM
- && fixed_regs[reg] && call_used_regs[reg]))
+ && fixed_regs[reg]
+ && call_used_or_fixed_reg_p (reg)))
|| (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
|| (crtl->calls_eh_return
&& (reg == EH_RETURN_DATA_REGNO (0)
if (!TARGET_FPU_ANY)
{
- error ("__builtin_saveregs not supported by this subtarget");
+ error ("%<__builtin_saveregs%> not supported by this subtarget");
return const0_rtx;
}
tree addr, lab_over = NULL, result = NULL;
tree eff_type;
- const bool pass_by_ref =
- !VOID_TYPE_P (type)
- && targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
+ const bool pass_by_ref
+ = !VOID_TYPE_P (type) && must_pass_va_arg_in_stack (type);
if (pass_by_ref)
type = build_pointer_type (type);
}
static bool
-sh_pass_by_reference (cumulative_args_t cum_v, machine_mode mode,
- const_tree type, bool named ATTRIBUTE_UNUSED)
+sh_pass_by_reference (cumulative_args_t cum_v, const function_arg_info &arg)
{
CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
- if (targetm.calls.must_pass_in_stack (mode, type))
+ if (targetm.calls.must_pass_in_stack (arg))
return true;
/* ??? std_gimplify_va_arg_expr passes NULL for cum. That function
}
static bool
-sh_callee_copies (cumulative_args_t cum, machine_mode mode,
- const_tree type, bool named ATTRIBUTE_UNUSED)
+sh_callee_copies (cumulative_args_t cum, const function_arg_info &arg)
{
/* ??? How can it possibly be correct to return true only on the
caller side of the equation? Is there someplace else in the
sh backend that's magically producing the copies? */
return (get_cumulative_args (cum)->outgoing
- && ((mode == BLKmode ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode))
+ && ((arg.mode == BLKmode
+ ? TYPE_ALIGN (arg.type)
+ : GET_MODE_ALIGNMENT (arg.mode))
% SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
}
}
static int
-sh_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
- tree type, bool named ATTRIBUTE_UNUSED)
+sh_arg_partial_bytes (cumulative_args_t cum_v, const function_arg_info &arg)
{
CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
int words = 0;
- if (sh_pass_in_reg_p (*cum, mode, type)
+ if (sh_pass_in_reg_p (*cum, arg.mode, arg.type)
&& !TARGET_FPU_DOUBLE
- && (sh_round_reg (*cum, mode)
- + (mode != BLKmode
- ? CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD)
- : CEIL (int_size_in_bytes (type), UNITS_PER_WORD))
- > NPARM_REGS (mode)))
- words = NPARM_REGS (mode) - sh_round_reg (*cum, mode);
+ && (sh_round_reg (*cum, arg.mode)
+ + CEIL (arg.promoted_size_in_bytes (), UNITS_PER_WORD)
+ > NPARM_REGS (arg.mode)))
+ words = NPARM_REGS (arg.mode) - sh_round_reg (*cum, arg.mode);
return words * UNITS_PER_WORD;
}
Value is zero to push the argument on the stack,
or a hard register in which to store the argument.
- MODE is the argument's machine mode.
- TYPE is the data type of the argument (as a tree).
- This is null for libcalls where that information may
- not be available.
CUM is a variable of type CUMULATIVE_ARGS which gives info about
the preceding args and about the function being called.
- NAMED is nonzero if this argument is a named parameter
- (otherwise it is an extra parameter matching an ellipsis).
+ ARG is a description of the argument.
On SH the first args are normally in registers
and the rest are pushed. Any arg that starts within the first
NPARM_REGS words is at least partially passed in a register unless
its data type forbids. */
static rtx
-sh_function_arg (cumulative_args_t ca_v, machine_mode mode,
- const_tree type, bool named)
+sh_function_arg (cumulative_args_t ca_v, const function_arg_info &arg)
{
CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
+ machine_mode mode = arg.mode;
- if (mode == VOIDmode)
+ if (arg.end_marker_p ())
return ca->renesas_abi ? const1_rtx : const0_rtx;
- if (sh_pass_in_reg_p (*ca, mode, type)
- && (named || ! (TARGET_HITACHI || ca->renesas_abi)))
+ if (sh_pass_in_reg_p (*ca, mode, arg.type)
+ && (arg.named || ! (TARGET_HITACHI || ca->renesas_abi)))
{
int regno;
return NULL_RTX;
}
-/* Update the data in CUM to advance over an argument
- of mode MODE and data type TYPE.
- (TYPE is null for libcalls where that information may not be
- available.) */
+/* Update the data in CUM to advance over argument ARG. */
static void
-sh_function_arg_advance (cumulative_args_t ca_v, machine_mode mode,
- const_tree type, bool named ATTRIBUTE_UNUSED)
+sh_function_arg_advance (cumulative_args_t ca_v,
+ const function_arg_info &arg)
{
CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
{
/* Note that we've used the skipped register. */
- if (mode == SFmode && ca->free_single_fp_reg)
+ if (arg.mode == SFmode && ca->free_single_fp_reg)
{
ca->free_single_fp_reg = 0;
return;
skipped in order to align the DF value. We note this skipped
register, because the next SF value will use it, and not the
SF that follows the DF. */
- if (mode == DFmode
+ if (arg.mode == DFmode
&& sh_round_reg (*ca, DFmode) != sh_round_reg (*ca, SFmode))
{
ca->free_single_fp_reg = (sh_round_reg (*ca, SFmode)
- + BASE_ARG_REG (mode));
+ + BASE_ARG_REG (arg.mode));
}
}
if (! ((TARGET_SH4 || TARGET_SH2A) || ca->renesas_abi)
- || sh_pass_in_reg_p (*ca, mode, type))
- (ca->arg_count[(int) get_sh_arg_class (mode)]
- = (sh_round_reg (*ca, mode)
- + (mode == BLKmode
- ? CEIL (int_size_in_bytes (type), UNITS_PER_WORD)
- : CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD))));
+ || sh_pass_in_reg_p (*ca, arg.mode, arg.type))
+ (ca->arg_count[(int) get_sh_arg_class (arg.mode)]
+ = (sh_round_reg (*ca, arg.mode)
+ + CEIL (arg.promoted_size_in_bytes (), UNITS_PER_WORD)));
}
/* The Renesas calling convention doesn't quite fit into this scheme since
function that tell if a function uses varargs or stdarg. */
static void
sh_setup_incoming_varargs (cumulative_args_t ca,
- machine_mode mode,
- tree type,
+ const function_arg_info &arg,
int *pretend_arg_size,
int second_time ATTRIBUTE_UNUSED)
{
{
int named_parm_regs, anon_parm_regs;
- named_parm_regs = (sh_round_reg (*get_cumulative_args (ca), mode)
- + (mode == BLKmode
- ? CEIL (int_size_in_bytes (type), UNITS_PER_WORD)
- : CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD)));
+ named_parm_regs = (sh_round_reg (*get_cumulative_args (ca), arg.mode)
+ + CEIL (arg.promoted_size_in_bytes (),
+ UNITS_PER_WORD));
anon_parm_regs = NPARM_REGS (SImode) - named_parm_regs;
if (anon_parm_regs > 0)
*pretend_arg_size = anon_parm_regs * 4;
char* dash = strchr (str, '-');
if (!dash)
{
- warning (0, "value of -mfixed-range must have form REG1-REG2");
+ warning (0, "value of %<-mfixed-range%> must have form REG1-REG2");
return;
}
*dash = '\0';
}
for (int i = first; i <= last; ++i)
- fixed_regs[i] = call_used_regs[i] = 1;
+ fixed_regs[i] = 1;
if (!comma)
break;
return false;
for (tree list = SH_ATTRIBUTES (func); list; list = TREE_CHAIN (list))
- if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
+ if (is_attribute_p ("function_vector", get_attribute_name (list)))
return true;
return false;
if (set == NULL && reg_overlap_mentioned_p (reg, PATTERN (insn)))
return false;
- if (code == CALL_INSN && call_really_used_regs[REGNO (reg)])
+ if (code == CALL_INSN && call_used_regs[REGNO (reg)])
return true;
}
return true;
/* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
if DEP_INSN is anti-flow dependent on INSN. */
static bool
-flow_dependent_p (rtx insn, rtx dep_insn)
+flow_dependent_p (rtx_insn *insn, rtx_insn *dep_insn)
{
rtx tmp = PATTERN (insn);
- note_stores (PATTERN (dep_insn), flow_dependent_p_1, &tmp);
+ note_stores (dep_insn, flow_dependent_p_1, &tmp);
return tmp == NULL_RTX;
}
machine_mode mode ATTRIBUTE_UNUSED, int ignore)
{
tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
- unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ unsigned int fcode = DECL_MD_FUNCTION_CODE (fndecl);
const struct builtin_description *d = &bdesc[fcode];
enum insn_code icode = d->icode;
int signature = d->signature;
&& ((regno - FIRST_FP_REG) & 1) == 0)))
return mode;
- return choose_hard_reg_mode (regno, nregs, false);
+ return choose_hard_reg_mode (regno, nregs, NULL);
}
/* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
+ const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
CUMULATIVE_ARGS cum;
int structure_value_byref = 0;
rtx this_rtx, this_value, sibcall, funexp;
{
tree ptype = build_pointer_type (TREE_TYPE (funtype));
- sh_function_arg_advance (pack_cumulative_args (&cum), Pmode, ptype, true);
+ function_arg_info ptr_arg (ptype, Pmode, /*named=*/true);
+ sh_function_arg_advance (pack_cumulative_args (&cum), ptr_arg);
}
- this_rtx
- = sh_function_arg (pack_cumulative_args (&cum), Pmode, ptr_type_node, true);
+ function_arg_info ptr_arg (ptr_type_node, Pmode, /*named=*/true);
+ this_rtx = sh_function_arg (pack_cumulative_args (&cum), ptr_arg);
/* For SHcompact, we only have r0 for a scratch register: r1 is the
static chain pointer (even if you can't have nested virtual functions
registers are used for argument passing, are callee-saved, or reserved. */
/* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
-ffixed-reg has been used. */
- if (! call_used_regs[0] || fixed_regs[0])
+ if (! call_used_or_fixed_reg_p (0) || fixed_regs[0])
error ("r0 needs to be available as a call-clobbered register");
scratch0 = scratch1 = scratch2 = gen_rtx_REG (Pmode, 0);
{
- if (call_used_regs[1] && ! fixed_regs[1])
+ if (call_used_or_fixed_reg_p (1) && ! fixed_regs[1])
scratch1 = gen_rtx_REG (ptr_mode, 1);
/* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
pointing where to return struct values. */
- if (call_used_regs[3] && ! fixed_regs[3])
+ if (call_used_or_fixed_reg_p (3) && ! fixed_regs[3])
scratch2 = gen_rtx_REG (Pmode, 3);
}
emit_insn (gen_add2_insn (scratch0, GEN_INT (vcall_offset)));
offset_addr = scratch0;
}
- else if (scratch0 != scratch1)
- {
- emit_move_insn (scratch1, GEN_INT (vcall_offset));
- emit_insn (gen_add2_insn (scratch0, scratch1));
- offset_addr = scratch0;
- }
else
gcc_unreachable (); /* FIXME */
emit_load_ptr (scratch0, offset_addr);
emit_barrier ();
/* Run just enough of rest_of_compilation to do scheduling and get
- the insns emitted. Note that use_thunk calls
- assemble_start_function and assemble_end_function. */
+ the insns emitted. */
insns = get_insns ();
sh_reorg ();
shorten_branches (insns);
+ assemble_start_function (thunk_fndecl, fnname);
final_start_function (insns, file, 1);
final (insns, file, 1);
final_end_function ();
+ assemble_end_function (thunk_fndecl, fnname);
reload_completed = 0;
epilogue_completed = 0;
return true;
}
-/* Return true if DISP can be legitimized. */
+/* Implement TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT. */
static bool
-sh_legitimize_address_displacement (rtx *disp, rtx *offs,
+sh_legitimize_address_displacement (rtx *offset1, rtx *offset2,
+ poly_int64 orig_offset,
machine_mode mode)
{
if ((TARGET_FPU_DOUBLE && mode == DFmode)
|| (TARGET_SH2E && mode == SFmode))
return false;
- struct disp_adjust adj = sh_find_mov_disp_adjust (mode, INTVAL (*disp));
+ struct disp_adjust adj = sh_find_mov_disp_adjust (mode, orig_offset);
if (adj.offset_adjust != NULL_RTX && adj.mov_disp != NULL_RTX)
{
- *disp = adj.mov_disp;
- *offs = adj.offset_adjust;
+ *offset1 = adj.offset_adjust;
+ *offset2 = adj.mov_disp;
return true;
}
{
for (int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno ++)
if (! VALID_REGISTER_P (regno))
- fixed_regs[regno] = call_used_regs[regno] = 1;
+ fixed_regs[regno] = 1;
/* R8 and R9 are call-clobbered on SH5, but not on earlier SH ABIs. */
if (flag_pic)
- {
- fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
- call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
- }
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
if (TARGET_FDPIC)
{
fixed_regs[PIC_REG] = 1;
call_used_regs[PIC_REG] = 1;
- call_really_used_regs[PIC_REG] = 1;
}
/* Renesas saves and restores mac registers on call. */
if (TARGET_HITACHI && ! TARGET_NOMACSAVE)
{
- call_really_used_regs[MACH_REG] = 0;
- call_really_used_regs[MACL_REG] = 0;
+ call_used_regs[MACH_REG] = 0;
+ call_used_regs[MACL_REG] = 0;
}
for (int regno = FIRST_GENERAL_REG; regno <= LAST_GENERAL_REG; regno++)
- if (! fixed_regs[regno] && call_really_used_regs[regno])
+ if (! fixed_regs[regno] && call_used_regs[regno])
SET_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], regno);
- call_really_used_regs[FPSCR_MODES_REG] = 0;
- call_really_used_regs[FPSCR_STAT_REG] = 0;
+ call_used_regs[FPSCR_MODES_REG] = 0;
+ call_used_regs[FPSCR_STAT_REG] = 0;
}
/* Implement TARGET_LEGITIMATE_CONSTANT_P
{
if (CALL_P (DF_REF_INSN (d)))
{
- if (REGNO_REG_SET_P (regs_invalidated_by_call_regset, GBR_REG))
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, GBR_REG))
return NULL_RTX;
else
continue;
else
{
- set_of_reg op_set = sh_find_set_of_reg (ops[i], insn,
- prev_nonnote_insn_bb);
+ set_of_reg op_set = sh_find_set_of_reg
+ (ops[i], insn, prev_nonnote_nondebug_insn_bb);
if (op_set.set_src == NULL_RTX)
continue;
if (GET_MODE (extended_op) != SImode)
return NULL_RTX;
- set_of_reg s = sh_find_set_of_reg (extended_op, insn, prev_nonnote_insn_bb);
+ set_of_reg s = sh_find_set_of_reg (extended_op, insn,
+ prev_nonnote_nondebug_insn_bb);
if (s.set_src == NULL_RTX)
return NULL_RTX;
if (!can_create_pseudo_p ())
return false;
- set_of_reg t_before_negc = sh_find_set_of_reg (get_t_reg_rtx (), curr_insn,
- prev_nonnote_insn_bb);
- set_of_reg t_after_negc = sh_find_set_of_reg (get_t_reg_rtx (), curr_insn,
- next_nonnote_insn_bb);
+ set_of_reg t_before_negc = sh_find_set_of_reg
+ (get_t_reg_rtx (), curr_insn, prev_nonnote_nondebug_insn_bb);
+ set_of_reg t_after_negc = sh_find_set_of_reg
+ (get_t_reg_rtx (), curr_insn, next_nonnote_nondebug_insn_bb);
if (t_before_negc.set_rtx != NULL_RTX && t_after_negc.set_rtx != NULL_RTX
&& rtx_equal_p (t_before_negc.set_rtx, t_after_negc.set_rtx)
Also try to look through the first extension that we hit. There are some
cases, where a zero_extend is followed an (implicit) sign_extend, and it
fails to see the sign_extend. */
- sh_extending_set_of_reg result =
- sh_find_set_of_reg (reg, curr_insn, prev_nonnote_insn_bb, true);
+ sh_extending_set_of_reg result = sh_find_set_of_reg
+ (reg, curr_insn, prev_nonnote_nondebug_insn_bb, true);
if (result.set_src != NULL)
{
rtx r = gen_reg_rtx (SImode);
rtx_insn* i0;
if (from_mode == QImode)
- i0 = emit_insn_after (gen_extendqisi2 (r, set_src), insn);
+ i0 = sh_check_add_incdec_notes (
+ emit_insn_after (gen_extendqisi2 (r, set_src), insn));
else if (from_mode == HImode)
- i0 = emit_insn_after (gen_extendhisi2 (r, set_src), insn);
+ i0 = sh_check_add_incdec_notes (
+ emit_insn_after (gen_extendhisi2 (r, set_src), insn));
else
gcc_unreachable ();
sh_emit_mode_set (int entity ATTRIBUTE_UNUSED, int mode,
int prev_mode, HARD_REG_SET regs_live ATTRIBUTE_UNUSED)
{
- if ((TARGET_SH4A_FP || TARGET_SH4_300)
+ if ((TARGET_SH4A_FP || TARGET_FPU_SH4_300)
&& prev_mode != FP_MODE_NONE && prev_mode != mode)
{
emit_insn (gen_toggle_pr ());