/* Output routines for GCC for Renesas / SuperH SH.
- Copyright (C) 1993-2018 Free Software Foundation, Inc.
+ Copyright (C) 1993-2021 Free Software Foundation, Inc.
Contributed by Steve Chamberlain (sac@cygnus.com).
Improved by Jim Wilson (wilson@cygnus.com).
HOST_WIDE_INT, tree);
static void sh_file_start (void);
static bool sh_assemble_integer (rtx, unsigned int, int);
-static bool flow_dependent_p (rtx, rtx);
+static bool flow_dependent_p (rtx_insn *, rtx_insn *);
static void flow_dependent_p_1 (rtx, const_rtx, void *);
static int shiftcosts (rtx);
static int and_xor_ior_costs (rtx, int);
static rtx sh_libcall_value (machine_mode, const_rtx);
static bool sh_return_in_memory (const_tree, const_tree);
static rtx sh_builtin_saveregs (void);
-static void sh_setup_incoming_varargs (cumulative_args_t, machine_mode,
- tree, int *, int);
+static void sh_setup_incoming_varargs (cumulative_args_t,
+ const function_arg_info &, int *, int);
static bool sh_strict_argument_naming (cumulative_args_t);
static bool sh_pretend_outgoing_varargs_named (cumulative_args_t);
static void sh_atomic_assign_expand_fenv (tree *, tree *, tree *);
int *punsignedp,
const_tree funtype,
int for_return);
-static bool sh_pass_by_reference (cumulative_args_t, machine_mode,
- const_tree, bool);
-static bool sh_callee_copies (cumulative_args_t, machine_mode,
- const_tree, bool);
-static int sh_arg_partial_bytes (cumulative_args_t, machine_mode,
- tree, bool);
-static void sh_function_arg_advance (cumulative_args_t, machine_mode,
- const_tree, bool);
-static rtx sh_function_arg (cumulative_args_t, machine_mode,
- const_tree, bool);
+static bool sh_pass_by_reference (cumulative_args_t,
+ const function_arg_info &);
+static bool sh_callee_copies (cumulative_args_t, const function_arg_info &);
+static int sh_arg_partial_bytes (cumulative_args_t, const function_arg_info &);
+static void sh_function_arg_advance (cumulative_args_t,
+ const function_arg_info &);
+static rtx sh_function_arg (cumulative_args_t, const function_arg_info &);
static int sh_dwarf_calling_convention (const_tree);
static void sh_encode_section_info (tree, rtx, int);
static bool sh2a_function_vector_p (tree);
#undef TARGET_CONSTANT_ALIGNMENT
#define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
+#undef TARGET_HAVE_SPECULATION_SAFE_VALUE
+#define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
{
if (tokens[i] == "strict")
ret.strict = true;
- else if (tokens[i].find ("gbr-offset=") == 0)
+ else if (!tokens[i].compare (0, strlen ("gbr-offset="), "gbr-offset="))
{
std::string offset_str = tokens[i].substr (strlen ("gbr-offset="));
ret.tcb_gbr_offset = integral_argument (offset_str.c_str ());
/* Run sh_treg_combine pass after register allocation and basic block
reordering as this sometimes creates new opportunities. */
register_pass (make_pass_sh_treg_combine (g, true, "sh_treg_combine3"),
- PASS_POS_INSERT_AFTER, "split4", 1);
+ PASS_POS_INSERT_AFTER, "split3", 1);
/* Optimize sett and clrt insns, by e.g. removing them if the T bit value
is known after a conditional branch.
to the pressure on R0. */
/* Enable sched1 for SH4 if the user explicitly requests.
When sched1 is enabled, the ready queue will be reordered by
- the target hooks if pressure is high. We can not do this for
+ the target hooks if pressure is high. We cannot do this for
PIC, SH3 and lower as they give spill failures for R0. */
if (!TARGET_HARD_SH4 || flag_pic)
flag_schedule_insns = 0;
else if (flag_exceptions)
{
if (flag_schedule_insns && global_options_set.x_flag_schedule_insns)
- warning (0, "ignoring -fschedule-insns because of exception "
+ warning (0, "ignoring %<-fschedule-insns%> because of exception "
"handling bug");
flag_schedule_insns = 0;
}
&& flag_omit_frame_pointer && !TARGET_ACCUMULATE_OUTGOING_ARGS)
{
warning (0, "unwind tables currently require either a frame pointer "
- "or -maccumulate-outgoing-args for correctness");
+ "or %<-maccumulate-outgoing-args%> for correctness");
TARGET_ACCUMULATE_OUTGOING_ARGS = 1;
}
if (flag_unsafe_math_optimizations)
{
/* Enable fsca insn for SH4A if not otherwise specified by the user. */
- if (global_options_set.x_TARGET_FSCA == 0 && TARGET_SH4A_FP)
+ if (global_options_set.x_TARGET_FSCA == 0
+ && (TARGET_SH4A_FP || TARGET_FPU_SH4_300))
TARGET_FSCA = 1;
/* Enable fsrra insn for SH4A if not otherwise specified by the user. */
- if (global_options_set.x_TARGET_FSRRA == 0 && TARGET_SH4A_FP)
+ if (global_options_set.x_TARGET_FSRRA == 0
+ && (TARGET_SH4A_FP || TARGET_FPU_SH4_300))
TARGET_FSRRA = 1;
}
parse_alignment_opts ();
if (flag_align_jumps && !str_align_jumps)
str_align_jumps = "2";
- else if (align_jumps_value < 2)
+ else if (align_jumps.levels[0].get_value () < 2)
str_align_jumps = "2";
if (flag_align_functions && !str_align_functions)
{
/* Parse values so that we can compare for current value. */
parse_alignment_opts ();
- int min_align = MAX (align_loops_value, align_jumps_value);
+ int min_align = MAX (align_loops.levels[0].get_value (),
+ align_jumps.levels[0].get_value ());
/* Also take possible .long constants / mova tables into account. */
if (min_align < 4)
min_align = 4;
- if (align_functions_value < min_align)
+ if (align_functions.levels[0].get_value () < min_align)
{
char *r = XNEWVEC (char, 16);
sprintf (r, "%d", min_align);
lsw_taken = EQ;
if (prob.initialized_p ())
{
- /* FIXME: This is not optimal. We do not really know the probablity
+ /* FIXME: This is not optimal. We do not really know the probability
that values differ by MCW only, but we should probably distribute
probabilities more evenly. */
msw_skip_prob = rev_prob;
&& CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
{
if (optimize)
- new_align = 1 << label_to_alignment (from);
+ new_align = 1 << label_to_alignment (from).levels[0].log;
else if (BARRIER_P (prev_nonnote_insn (from)))
new_align = 1 << barrier_align (from);
else
&& (prev_nonnote_insn (from)
== XEXP (MOVA_LABELREF (mova), 0))))
num_mova--;
- if (barrier_align (next_real_insn (from)) == align_jumps_log)
+ if (barrier_align (next_real_insn (from)) == align_jumps.levels[0].log)
{
/* We have just passed the barrier in front of the
ADDR_DIFF_VEC, which is stored in found_barrier. Since
return ((optimize_size
|| ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
<= (unsigned) 1 << (CACHE_LOG - 2)))
- ? 1 : align_jumps_log);
+ ? 1 : align_jumps.levels[0].log);
}
rtx_insn *next = next_active_insn (barrier_or_label);
return 0;
if (! TARGET_SH2 || ! optimize)
- return align_jumps_log;
+ return align_jumps.levels[0].log;
/* When fixing up pcloads, a constant table might be inserted just before
the basic block that ends with the barrier. Thus, we can't trust the
}
}
- return align_jumps_log;
+ return align_jumps.levels[0].log;
}
/* If we are inside a phony loop, almost any kind of label can turn up as the
|| recog_memoized (next) == CODE_FOR_consttable_2)
return 0;
- return align_loops_log;
+ return align_loops.levels[0].log;
}
/* Do a final pass over the function, just before delayed branch
to handle this case, so just die when we see it. */
if (epilogue_p < 0
|| current_function_interrupt
- || ! call_really_used_regs[temp] || fixed_regs[temp])
+ || ! call_used_regs[temp] || fixed_regs[temp])
temp = -1;
if (temp < 0 && ! current_function_interrupt && epilogue_p >= 0)
{
- HARD_REG_SET temps;
- COPY_HARD_REG_SET (temps, call_used_reg_set);
- AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
+ HARD_REG_SET temps = (regs_invalidated_by_call
+ & ~fixed_reg_set
+ & savable_regs);
if (epilogue_p > 0)
{
int nreg = 0;
{
HARD_REG_SET temps;
- COPY_HARD_REG_SET (temps, *live_regs_mask);
+ temps = *live_regs_mask;
CLEAR_HARD_REG_BIT (temps, REGNO (reg));
temp = scavenge_reg (&temps);
}
if (i == FIRST_FP_REG && interrupt_handler && TARGET_FMOVD
&& hard_reg_set_intersect_p (*mask, reg_class_contents[DF_REGS]))
{
- HARD_REG_SET unsaved;
-
push (FPSCR_REG);
- COMPL_HARD_REG_SET (unsaved, *mask);
- fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
+ fpscr_set_from_mem (NORMAL_MODE (FP_MODE), ~*mask);
skip_fpscr = true;
}
if (i != PR_REG
else if (TARGET_FPU_DOUBLE && TARGET_FMOVD && TARGET_FPU_SINGLE)
for (int count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
if (df_regs_ever_live_p (reg) && df_regs_ever_live_p (reg+1)
- && (! call_really_used_regs[reg]
+ && (! call_used_regs[reg]
|| interrupt_handler)
&& ++count > 2)
{
: interrupt_handler
? (/* Need to save all the regs ever live. */
(df_regs_ever_live_p (reg)
- || (call_really_used_regs[reg]
+ || (call_used_regs[reg]
&& (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
|| reg == PIC_OFFSET_TABLE_REGNUM)
&& has_call))
: (/* Only push those regs which are used and need to be saved. */
(false)
|| (df_regs_ever_live_p (reg)
- && ((!call_really_used_regs[reg]
+ && ((!call_used_regs[reg]
&& !(reg != PIC_OFFSET_TABLE_REGNUM
- && fixed_regs[reg] && call_used_regs[reg]))
+ && fixed_regs[reg]
+ && call_used_or_fixed_reg_p (reg)))
|| (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
|| (crtl->calls_eh_return
&& (reg == EH_RETURN_DATA_REGNO (0)
if (!TARGET_FPU_ANY)
{
- error ("__builtin_saveregs not supported by this subtarget");
+ error ("%<__builtin_saveregs%> not supported by this subtarget");
return const0_rtx;
}
tree addr, lab_over = NULL, result = NULL;
tree eff_type;
- const bool pass_by_ref =
- !VOID_TYPE_P (type)
- && targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
+ const bool pass_by_ref
+ = !VOID_TYPE_P (type) && must_pass_va_arg_in_stack (type);
if (pass_by_ref)
type = build_pointer_type (type);
}
static bool
-sh_pass_by_reference (cumulative_args_t cum_v, machine_mode mode,
- const_tree type, bool named ATTRIBUTE_UNUSED)
+sh_pass_by_reference (cumulative_args_t cum_v, const function_arg_info &arg)
{
CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
- if (targetm.calls.must_pass_in_stack (mode, type))
+ if (targetm.calls.must_pass_in_stack (arg))
return true;
/* ??? std_gimplify_va_arg_expr passes NULL for cum. That function
}
static bool
-sh_callee_copies (cumulative_args_t cum, machine_mode mode,
- const_tree type, bool named ATTRIBUTE_UNUSED)
+sh_callee_copies (cumulative_args_t cum, const function_arg_info &arg)
{
/* ??? How can it possibly be correct to return true only on the
caller side of the equation? Is there someplace else in the
sh backend that's magically producing the copies? */
return (get_cumulative_args (cum)->outgoing
- && ((mode == BLKmode ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode))
+ && ((arg.mode == BLKmode
+ ? TYPE_ALIGN (arg.type)
+ : GET_MODE_ALIGNMENT (arg.mode))
% SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
}
}
static int
-sh_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
- tree type, bool named ATTRIBUTE_UNUSED)
+sh_arg_partial_bytes (cumulative_args_t cum_v, const function_arg_info &arg)
{
CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
int words = 0;
- if (sh_pass_in_reg_p (*cum, mode, type)
+ if (sh_pass_in_reg_p (*cum, arg.mode, arg.type)
&& !TARGET_FPU_DOUBLE
- && (sh_round_reg (*cum, mode)
- + (mode != BLKmode
- ? CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD)
- : CEIL (int_size_in_bytes (type), UNITS_PER_WORD))
- > NPARM_REGS (mode)))
- words = NPARM_REGS (mode) - sh_round_reg (*cum, mode);
+ && (sh_round_reg (*cum, arg.mode)
+ + CEIL (arg.promoted_size_in_bytes (), UNITS_PER_WORD)
+ > NPARM_REGS (arg.mode)))
+ words = NPARM_REGS (arg.mode) - sh_round_reg (*cum, arg.mode);
return words * UNITS_PER_WORD;
}
Value is zero to push the argument on the stack,
or a hard register in which to store the argument.
- MODE is the argument's machine mode.
- TYPE is the data type of the argument (as a tree).
- This is null for libcalls where that information may
- not be available.
CUM is a variable of type CUMULATIVE_ARGS which gives info about
the preceding args and about the function being called.
- NAMED is nonzero if this argument is a named parameter
- (otherwise it is an extra parameter matching an ellipsis).
+ ARG is a description of the argument.
On SH the first args are normally in registers
and the rest are pushed. Any arg that starts within the first
NPARM_REGS words is at least partially passed in a register unless
its data type forbids. */
static rtx
-sh_function_arg (cumulative_args_t ca_v, machine_mode mode,
- const_tree type, bool named)
+sh_function_arg (cumulative_args_t ca_v, const function_arg_info &arg)
{
CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
+ machine_mode mode = arg.mode;
- if (mode == VOIDmode)
+ if (arg.end_marker_p ())
return ca->renesas_abi ? const1_rtx : const0_rtx;
- if (sh_pass_in_reg_p (*ca, mode, type)
- && (named || ! (TARGET_HITACHI || ca->renesas_abi)))
+ if (sh_pass_in_reg_p (*ca, mode, arg.type)
+ && (arg.named || ! (TARGET_HITACHI || ca->renesas_abi)))
{
int regno;
return NULL_RTX;
}
-/* Update the data in CUM to advance over an argument
- of mode MODE and data type TYPE.
- (TYPE is null for libcalls where that information may not be
- available.) */
+/* Update the data in CUM to advance over argument ARG. */
static void
-sh_function_arg_advance (cumulative_args_t ca_v, machine_mode mode,
- const_tree type, bool named ATTRIBUTE_UNUSED)
+sh_function_arg_advance (cumulative_args_t ca_v,
+ const function_arg_info &arg)
{
CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
{
/* Note that we've used the skipped register. */
- if (mode == SFmode && ca->free_single_fp_reg)
+ if (arg.mode == SFmode && ca->free_single_fp_reg)
{
ca->free_single_fp_reg = 0;
return;
skipped in order to align the DF value. We note this skipped
register, because the next SF value will use it, and not the
SF that follows the DF. */
- if (mode == DFmode
+ if (arg.mode == DFmode
&& sh_round_reg (*ca, DFmode) != sh_round_reg (*ca, SFmode))
{
ca->free_single_fp_reg = (sh_round_reg (*ca, SFmode)
- + BASE_ARG_REG (mode));
+ + BASE_ARG_REG (arg.mode));
}
}
if (! ((TARGET_SH4 || TARGET_SH2A) || ca->renesas_abi)
- || sh_pass_in_reg_p (*ca, mode, type))
- (ca->arg_count[(int) get_sh_arg_class (mode)]
- = (sh_round_reg (*ca, mode)
- + (mode == BLKmode
- ? CEIL (int_size_in_bytes (type), UNITS_PER_WORD)
- : CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD))));
+ || sh_pass_in_reg_p (*ca, arg.mode, arg.type))
+ (ca->arg_count[(int) get_sh_arg_class (arg.mode)]
+ = (sh_round_reg (*ca, arg.mode)
+ + CEIL (arg.promoted_size_in_bytes (), UNITS_PER_WORD)));
}
/* The Renesas calling convention doesn't quite fit into this scheme since
function that tell if a function uses varargs or stdarg. */
static void
sh_setup_incoming_varargs (cumulative_args_t ca,
- machine_mode mode,
- tree type,
+ const function_arg_info &arg,
int *pretend_arg_size,
int second_time ATTRIBUTE_UNUSED)
{
{
int named_parm_regs, anon_parm_regs;
- named_parm_regs = (sh_round_reg (*get_cumulative_args (ca), mode)
- + (mode == BLKmode
- ? CEIL (int_size_in_bytes (type), UNITS_PER_WORD)
- : CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD)));
+ named_parm_regs = (sh_round_reg (*get_cumulative_args (ca), arg.mode)
+ + CEIL (arg.promoted_size_in_bytes (),
+ UNITS_PER_WORD));
anon_parm_regs = NPARM_REGS (SImode) - named_parm_regs;
if (anon_parm_regs > 0)
*pretend_arg_size = anon_parm_regs * 4;
char* dash = strchr (str, '-');
if (!dash)
{
- warning (0, "value of -mfixed-range must have form REG1-REG2");
+ warning (0, "value of %<-mfixed-range%> must have form REG1-REG2");
return;
}
*dash = '\0';
}
for (int i = first; i <= last; ++i)
- fixed_regs[i] = call_used_regs[i] = 1;
+ fixed_regs[i] = 1;
if (!comma)
break;
return false;
for (tree list = SH_ATTRIBUTES (func); list; list = TREE_CHAIN (list))
- if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
+ if (is_attribute_p ("function_vector", get_attribute_name (list)))
return true;
return false;
if (set == NULL && reg_overlap_mentioned_p (reg, PATTERN (insn)))
return false;
- if (code == CALL_INSN && call_really_used_regs[REGNO (reg)])
+ if (code == CALL_INSN && call_used_regs[REGNO (reg)])
return true;
}
return true;
/* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
if DEP_INSN is anti-flow dependent on INSN. */
static bool
-flow_dependent_p (rtx insn, rtx dep_insn)
+flow_dependent_p (rtx_insn *insn, rtx_insn *dep_insn)
{
rtx tmp = PATTERN (insn);
- note_stores (PATTERN (dep_insn), flow_dependent_p_1, &tmp);
+ note_stores (dep_insn, flow_dependent_p_1, &tmp);
return tmp == NULL_RTX;
}
machine_mode mode ATTRIBUTE_UNUSED, int ignore)
{
tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
- unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ unsigned int fcode = DECL_MD_FUNCTION_CODE (fndecl);
const struct builtin_description *d = &bdesc[fcode];
enum insn_code icode = d->icode;
int signature = d->signature;
&& ((regno - FIRST_FP_REG) & 1) == 0)))
return mode;
- return choose_hard_reg_mode (regno, nregs, false);
+ return choose_hard_reg_mode (regno, nregs, NULL);
}
/* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
+ const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
CUMULATIVE_ARGS cum;
int structure_value_byref = 0;
rtx this_rtx, this_value, sibcall, funexp;
{
tree ptype = build_pointer_type (TREE_TYPE (funtype));
- sh_function_arg_advance (pack_cumulative_args (&cum), Pmode, ptype, true);
+ function_arg_info ptr_arg (ptype, Pmode, /*named=*/true);
+ sh_function_arg_advance (pack_cumulative_args (&cum), ptr_arg);
}
- this_rtx
- = sh_function_arg (pack_cumulative_args (&cum), Pmode, ptr_type_node, true);
+ function_arg_info ptr_arg (ptr_type_node, Pmode, /*named=*/true);
+ this_rtx = sh_function_arg (pack_cumulative_args (&cum), ptr_arg);
/* For SHcompact, we only have r0 for a scratch register: r1 is the
static chain pointer (even if you can't have nested virtual functions
registers are used for argument passing, are callee-saved, or reserved. */
/* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
-ffixed-reg has been used. */
- if (! call_used_regs[0] || fixed_regs[0])
+ if (! call_used_or_fixed_reg_p (0) || fixed_regs[0])
error ("r0 needs to be available as a call-clobbered register");
scratch0 = scratch1 = scratch2 = gen_rtx_REG (Pmode, 0);
{
- if (call_used_regs[1] && ! fixed_regs[1])
+ if (call_used_or_fixed_reg_p (1) && ! fixed_regs[1])
scratch1 = gen_rtx_REG (ptr_mode, 1);
/* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
pointing where to return struct values. */
- if (call_used_regs[3] && ! fixed_regs[3])
+ if (call_used_or_fixed_reg_p (3) && ! fixed_regs[3])
scratch2 = gen_rtx_REG (Pmode, 3);
}
emit_insn (gen_add2_insn (scratch0, GEN_INT (vcall_offset)));
offset_addr = scratch0;
}
- else if (scratch0 != scratch1)
- {
- emit_move_insn (scratch1, GEN_INT (vcall_offset));
- emit_insn (gen_add2_insn (scratch0, scratch1));
- offset_addr = scratch0;
- }
else
gcc_unreachable (); /* FIXME */
emit_load_ptr (scratch0, offset_addr);
emit_barrier ();
/* Run just enough of rest_of_compilation to do scheduling and get
- the insns emitted. Note that use_thunk calls
- assemble_start_function and assemble_end_function. */
+ the insns emitted. */
insns = get_insns ();
sh_reorg ();
shorten_branches (insns);
+ assemble_start_function (thunk_fndecl, fnname);
final_start_function (insns, file, 1);
final (insns, file, 1);
final_end_function ();
+ assemble_end_function (thunk_fndecl, fnname);
reload_completed = 0;
epilogue_completed = 0;
{
for (int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno ++)
if (! VALID_REGISTER_P (regno))
- fixed_regs[regno] = call_used_regs[regno] = 1;
+ fixed_regs[regno] = 1;
/* R8 and R9 are call-clobbered on SH5, but not on earlier SH ABIs. */
if (flag_pic)
- {
- fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
- call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
- }
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
if (TARGET_FDPIC)
{
fixed_regs[PIC_REG] = 1;
call_used_regs[PIC_REG] = 1;
- call_really_used_regs[PIC_REG] = 1;
}
/* Renesas saves and restores mac registers on call. */
if (TARGET_HITACHI && ! TARGET_NOMACSAVE)
{
- call_really_used_regs[MACH_REG] = 0;
- call_really_used_regs[MACL_REG] = 0;
+ call_used_regs[MACH_REG] = 0;
+ call_used_regs[MACL_REG] = 0;
}
for (int regno = FIRST_GENERAL_REG; regno <= LAST_GENERAL_REG; regno++)
- if (! fixed_regs[regno] && call_really_used_regs[regno])
+ if (! fixed_regs[regno] && call_used_regs[regno])
SET_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], regno);
- call_really_used_regs[FPSCR_MODES_REG] = 0;
- call_really_used_regs[FPSCR_STAT_REG] = 0;
+ call_used_regs[FPSCR_MODES_REG] = 0;
+ call_used_regs[FPSCR_STAT_REG] = 0;
}
/* Implement TARGET_LEGITIMATE_CONSTANT_P
{
if (CALL_P (DF_REF_INSN (d)))
{
- if (REGNO_REG_SET_P (regs_invalidated_by_call_regset, GBR_REG))
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, GBR_REG))
return NULL_RTX;
else
continue;
rtx r = gen_reg_rtx (SImode);
rtx_insn* i0;
if (from_mode == QImode)
- i0 = emit_insn_after (gen_extendqisi2 (r, set_src), insn);
+ i0 = sh_check_add_incdec_notes (
+ emit_insn_after (gen_extendqisi2 (r, set_src), insn));
else if (from_mode == HImode)
- i0 = emit_insn_after (gen_extendhisi2 (r, set_src), insn);
+ i0 = sh_check_add_incdec_notes (
+ emit_insn_after (gen_extendhisi2 (r, set_src), insn));
else
gcc_unreachable ();
sh_emit_mode_set (int entity ATTRIBUTE_UNUSED, int mode,
int prev_mode, HARD_REG_SET regs_live ATTRIBUTE_UNUSED)
{
- if ((TARGET_SH4A_FP || TARGET_SH4_300)
+ if ((TARGET_SH4A_FP || TARGET_FPU_SH4_300)
&& prev_mode != FP_MODE_NONE && prev_mode != mode)
{
emit_insn (gen_toggle_pr ());