+2008-08-06 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * config/alpha/alpha.c (alpha_preferred_reload_class,
+ alpha_secondary_reload, alpha_emit_set_const_1, function_value,
+ alpha_output_mi_thunk_osf): Avoid C++ keywords.
+ * config/arm/arm.c (output_move_vfp, output_move_neon): Likewise.
+ * config/arm/arm.md: Likewise.
+ * config/avr/avr-protos.h (preferred_reload_class,
+ test_hard_reg_class, avr_simplify_comparison_p,
+ out_shift_with_cnt, class_max_nregs): Likewise.
+ * config/avr/avr.c (class_max_nregs, avr_simplify_comparison_p,
+ output_movqi, output_movhi, output_movsisf, out_shift_with_cnt,
+ preferred_reload_class, test_hard_reg_class): Likewise.
+ * config/bfin/bfin.c (legitimize_pic_address, hard_regno_mode_ok,
+ bfin_memory_move_cost, bfin_secondary_reload,
+ bfin_output_mi_thunk): Likewise.
+ * config/crx/crx.c (crx_secondary_reload_class,
+ crx_memory_move_cost): Likewise.
+ * config/frv/frv-protos.h (frv_secondary_reload_class,
+ frv_class_likely_spilled_p, frv_class_max_nregs): Likewise.
+ * config/frv/frv.c (frv_override_options, frv_alloc_temp_reg,
+ frv_secondary_reload_class, frv_class_likely_spilled_p,
+ frv_class_max_nregs): Likewise.
+ * config/h8300/h8300.c (h8300_classify_operand,
+ h8300_unary_length, h8300_bitfield_length, h8300_asm_insn_count):
+ Likewise.
+ * config/i386/winnt.c (i386_pe_declare_function_type): Likewise.
+ * config/ia64/ia64.c (ia64_preferred_reload_class,
+ ia64_secondary_reload_class, ia64_output_mi_thunk): Likewise.
+ * config/iq2000/iq2000.c (gen_int_relational): Likewise.
+ * config/m32c/m32c.c (class_can_hold_mode, m32c_output_compare):
+ Likewise.
+ * config/m68hc11/m68hc11.c (preferred_reload_class,
+ m68hc11_memory_move_cost): Likewise.
+ * config/mcore/mcore.c (mcore_secondary_reload_class,
+ mcore_reload_class): Likewise.
+ * config/mips/mips.c (mips_hard_regno_mode_ok_p,
+ mips_class_max_nregs, mips_cannot_change_mode_class,
+ mips_preferred_reload_class, mips_secondary_reload_class,
+ mips_output_mi_thunk): Likewise.
+ * config/mmix/mmix.c (mmix_preferred_reload_class,
+ mmix_preferred_output_reload_class, mmix_secondary_reload_class):
+ Likewise.
+ * config/mn10300/mn10300.c (mn10300_secondary_reload_class):
+ Likewise.
+ * config/pa/pa.c (pa_secondary_reload, pa_combine_instructions,
+ pa_can_combine_p, pa_cannot_change_mode_class): Likewise.
+ * config/pa/pa.h (LEGITIMIZE_RELOAD_ADDRESS): Likewise.
+ * config/rs6000/rs6000.c (paired_expand_vector_init,
+ rs6000_secondary_reload_class, rs6000_output_mi_thunk,
+ compare_section_name, rs6000_memory_move_cost): Likewise.
+ * config/s390/s390.c (s390_emit_compare_and_swap,
+ s390_preferred_reload_class, s390_secondary_reload,
+ legitimize_pic_address, legitimize_tls_address,
+ legitimize_reload_address, s390_expand_cs_hqi, s390_expand_atomic,
+ s390_class_max_nregs): Likewise.
+ * config/s390/s390.h (LEGITIMIZE_RELOAD_ADDRESS): Likewise.
+ * config/s390/s390.md: Likewise.
+ * config/score/score-protos.h (score_secondary_reload_class,
+ score_preferred_reload_class): Likewise.
+ * config/score/score.c (score_preferred_reload_class,
+ score_secondary_reload_class): Likewise.
+ * config/score/score3.c (score3_output_mi_thunk,
+ score3_preferred_reload_class, score3_secondary_reload_class,
+ score3_hard_regno_mode_ok): Likewise.
+ * config/score/score3.h (score3_preferred_reload_class,
+ score3_secondary_reload_class): Likewise.
+ * config/score/score7.c (score7_output_mi_thunk,
+ score7_preferred_reload_class, score7_secondary_reload_class,
+ score7_hard_regno_mode_ok): Likewise.
+ * config/score/score7.h (score7_preferred_reload_class,
+ score7_secondary_reload_class): Likewise.
+ * config/sh/sh.c (prepare_move_operands, output_far_jump,
+ output_branchy_insn, add_constant, gen_block_redirect,
+ sh_insn_length_adjustment, sh_cannot_change_mode_class,
+ sh_output_mi_thunk, replace_n_hard_rtx, sh_secondary_reload):
+ Likewise.
+ * config/sparc/sparc.c (sparc_output_mi_thunk): Likewise.
+ * config/stormy16/stormy16.c (xstormy16_output_cbranch_hi,
+ xstormy16_output_cbranch_si, xstormy16_secondary_reload_class,
+ xstormy16_preferred_reload_class): Likewise.
+ * config/xtensa/xtensa.c (xtensa_expand_compare_and_swap,
+ xtensa_expand_atomic, override_options,
+ xtensa_preferred_reload_class, xtensa_secondary_reload_class):
+ Likewise.
+ * reorg.c (try_merge_delay_insns): Likewise.
+ * tree.c (merge_dllimport_decl_attributes): Likewise.
+
+ * config/frv/frv.c (frv_print_operand): Change isalpha to ISALPHA.
+
2008-08-06 Michael Matz <matz@suse.de>
* Makefile.in (write_entries_to_file): Quote words.
/* On the Alpha, all (non-symbolic) constants except zero go into
a floating-point register via memory. Note that we cannot
- return anything that is not a subset of CLASS, and that some
+ return anything that is not a subset of RCLASS, and that some
symbolic constants cannot be dropped to memory. */
enum reg_class
-alpha_preferred_reload_class(rtx x, enum reg_class class)
+alpha_preferred_reload_class(rtx x, enum reg_class rclass)
{
/* Zero is present in any register class. */
if (x == CONST0_RTX (GET_MODE (x)))
- return class;
+ return rclass;
/* These sorts of constants we can easily drop to memory. */
if (GET_CODE (x) == CONST_INT
|| GET_CODE (x) == CONST_DOUBLE
|| GET_CODE (x) == CONST_VECTOR)
{
- if (class == FLOAT_REGS)
+ if (rclass == FLOAT_REGS)
return NO_REGS;
- if (class == ALL_REGS)
+ if (rclass == ALL_REGS)
return GENERAL_REGS;
- return class;
+ return rclass;
}
/* All other kinds of constants should not (and in the case of HIGH
cannot) be dropped to memory -- instead we use a GENERAL_REGS
secondary reload. */
if (CONSTANT_P (x))
- return (class == ALL_REGS ? GENERAL_REGS : class);
+ return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
- return class;
+ return rclass;
}
/* Inform reload about cases where moving X with a mode MODE to a register in
- CLASS requires an extra scratch or immediate register. Return the class
+ RCLASS requires an extra scratch or immediate register. Return the class
needed for the immediate register. */
static enum reg_class
-alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
+alpha_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
enum machine_mode mode, secondary_reload_info *sri)
{
/* Loading and storing HImode or QImode values to and from memory
/* We also cannot do integral arithmetic into FP regs, as might result
from register elimination into a DImode fp register. */
- if (class == FLOAT_REGS)
+ if (rclass == FLOAT_REGS)
{
if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
return GENERAL_REGS;
alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
HOST_WIDE_INT c, int n, bool no_output)
{
- HOST_WIDE_INT new;
+ HOST_WIDE_INT new_const;
int i, bits;
/* Use a pseudo if highly optimizing and still generating RTL. */
rtx subtarget
/* First, see if minus some low bits, we've an easy load of
high bits. */
- new = ((c & 0xffff) ^ 0x8000) - 0x8000;
- if (new != 0)
+ new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
+ if (new_const != 0)
{
- temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
+ temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
if (temp)
{
if (no_output)
return temp;
- return expand_binop (mode, add_optab, temp, GEN_INT (new),
+ return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
target, 0, OPTAB_WIDEN);
}
}
if (bits > 0)
for (; bits > 0; bits--)
{
- new = c >> bits;
- temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
+ new_const = c >> bits;
+ temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
if (!temp && c < 0)
{
- new = (unsigned HOST_WIDE_INT)c >> bits;
- temp = alpha_emit_set_const (subtarget, mode, new,
+ new_const = (unsigned HOST_WIDE_INT)c >> bits;
+ temp = alpha_emit_set_const (subtarget, mode, new_const,
i, no_output);
}
if (temp)
if (bits > 0)
for (; bits > 0; bits--)
{
- new = c << bits;
- temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
+ new_const = c << bits;
+ temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
if (!temp)
{
- new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
- temp = alpha_emit_set_const (subtarget, mode, new,
+ new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
+ temp = alpha_emit_set_const (subtarget, mode, new_const,
i, no_output);
}
if (temp)
if (bits > 0)
for (; bits > 0; bits--)
{
- new = c << bits;
- temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
+ new_const = c << bits;
+ temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
if (!temp)
{
- new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
- temp = alpha_emit_set_const (subtarget, mode, new,
+ new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
+ temp = alpha_emit_set_const (subtarget, mode, new_const,
i, no_output);
}
if (temp)
constant except that all bytes that are 0 are changed to be 0xff. If we
can, then we can do a ZAPNOT to obtain the desired constant. */
- new = c;
+ new_const = c;
for (i = 0; i < 64; i += 8)
- if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
- new |= (HOST_WIDE_INT) 0xff << i;
+ if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
+ new_const |= (HOST_WIDE_INT) 0xff << i;
/* We are only called for SImode and DImode. If this is SImode, ensure that
we are sign extended to a full word. */
if (mode == SImode)
- new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
- if (new != c)
+ if (new_const != c)
{
- temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
+ temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
if (temp)
{
if (no_output)
return temp;
- return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
+ return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
target, 0, OPTAB_WIDEN);
}
}
enum machine_mode mode)
{
unsigned int regnum, dummy;
- enum mode_class class;
+ enum mode_class mclass;
gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
if (valtype)
mode = TYPE_MODE (valtype);
- class = GET_MODE_CLASS (mode);
- switch (class)
+ mclass = GET_MODE_CLASS (mode);
+ switch (mclass)
{
case MODE_INT:
PROMOTE_MODE (mode, dummy, valtype);
tree function)
{
HOST_WIDE_INT hi, lo;
- rtx this, insn, funexp;
+ rtx this_rtx, insn, funexp;
/* We always require a valid GP. */
emit_insn (gen_prologue_ldgp ());
/* Find the "this" pointer. If the function returns a structure,
the structure return pointer is in $16. */
if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
- this = gen_rtx_REG (Pmode, 17);
+ this_rtx = gen_rtx_REG (Pmode, 17);
else
- this = gen_rtx_REG (Pmode, 16);
+ this_rtx = gen_rtx_REG (Pmode, 16);
/* Add DELTA. When possible we use ldah+lda. Otherwise load the
entire constant for the add. */
if (hi + lo == delta)
{
if (hi)
- emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
+ emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
if (lo)
- emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
+ emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
}
else
{
rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
delta, -(delta < 0));
- emit_insn (gen_adddi3 (this, this, tmp));
+ emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
}
/* Add a delta stored in the vtable at VCALL_OFFSET. */
rtx tmp, tmp2;
tmp = gen_rtx_REG (Pmode, 0);
- emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
tmp2 = tmp;
emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
- emit_insn (gen_adddi3 (this, this, tmp));
+ emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
}
/* Generate a tail call to the target function. */
int load = REG_P (operands[0]);
int dp = GET_MODE_SIZE (GET_MODE (operands[0])) == 8;
int integer_p = GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT;
- const char *template;
+ const char *templ;
char buff[50];
enum machine_mode mode;
switch (GET_CODE (addr))
{
case PRE_DEC:
- template = "f%smdb%c%%?\t%%0!, {%%%s1}%s";
+ templ = "f%smdb%c%%?\t%%0!, {%%%s1}%s";
ops[0] = XEXP (addr, 0);
ops[1] = reg;
break;
case POST_INC:
- template = "f%smia%c%%?\t%%0!, {%%%s1}%s";
+ templ = "f%smia%c%%?\t%%0!, {%%%s1}%s";
ops[0] = XEXP (addr, 0);
ops[1] = reg;
break;
default:
- template = "f%s%c%%?\t%%%s0, %%1%s";
+ templ = "f%s%c%%?\t%%%s0, %%1%s";
ops[0] = reg;
ops[1] = mem;
break;
}
- sprintf (buff, template,
+ sprintf (buff, templ,
load ? "ld" : "st",
dp ? 'd' : 's',
dp ? "P" : "",
{
rtx reg, mem, addr, ops[2];
int regno, load = REG_P (operands[0]);
- const char *template;
+ const char *templ;
char buff[50];
enum machine_mode mode;
switch (GET_CODE (addr))
{
case POST_INC:
- template = "v%smia%%?\t%%0!, %%h1";
+ templ = "v%smia%%?\t%%0!, %%h1";
ops[0] = XEXP (addr, 0);
ops[1] = reg;
break;
}
default:
- template = "v%smia%%?\t%%m0, %%h1";
+ templ = "v%smia%%?\t%%m0, %%h1";
ops[0] = mem;
ops[1] = reg;
}
- sprintf (buff, template, load ? "ld" : "st");
+ sprintf (buff, templ, load ? "ld" : "st");
output_asm_insn (buff, ops);
return "";
&& GET_CODE (base = XEXP (base, 0)) == REG))
&& REGNO_POINTER_ALIGN (REGNO (base)) >= 32)
{
- rtx new;
+ rtx new_rtx;
- new = widen_memory_access (operands[1], SImode,
- ((INTVAL (offset) & ~3)
- - INTVAL (offset)));
- emit_insn (gen_movsi (reg, new));
+ new_rtx = widen_memory_access (operands[1], SImode,
+ ((INTVAL (offset) & ~3)
+ - INTVAL (offset)));
+ emit_insn (gen_movsi (reg, new_rtx));
if (((INTVAL (offset) & 2) != 0)
^ (BYTES_BIG_ENDIAN ? 1 : 0))
{
extern void avr_output_addr_vec_elt (FILE *stream, int value);
extern const char *avr_out_sbxx_branch (rtx insn, rtx operands[]);
-extern enum reg_class preferred_reload_class (rtx x, enum reg_class class);
+extern enum reg_class preferred_reload_class (rtx x, enum reg_class rclass);
extern int extra_constraint_Q (rtx x);
extern rtx legitimize_address (rtx x, rtx oldx, enum machine_mode mode);
extern int adjust_insn_length (rtx insn, int len);
extern int _reg_unused_after (rtx insn, rtx reg);
extern int avr_jump_mode (rtx x, rtx insn);
extern int byte_immediate_operand (rtx op, enum machine_mode mode);
-extern int test_hard_reg_class (enum reg_class class, rtx x);
+extern int test_hard_reg_class (enum reg_class rclass, rtx x);
extern int jump_over_one_insn_p (rtx insn, rtx dest);
extern int avr_hard_regno_mode_ok (int regno, enum machine_mode mode);
extern void final_prescan_insn (rtx insn, rtx *operand, int num_operands);
extern int avr_simplify_comparison_p (enum machine_mode mode,
- RTX_CODE operator, rtx x);
+ RTX_CODE op, rtx x);
extern RTX_CODE avr_normalize_condition (RTX_CODE condition);
extern int compare_eq_p (rtx insn);
-extern void out_shift_with_cnt (const char *template, rtx insn,
+extern void out_shift_with_cnt (const char *templ, rtx insn,
rtx operands[], int *len, int t_len);
#endif /* RTX_CODE */
#ifdef HAVE_MACHINE_MODES
-extern int class_max_nregs (enum reg_class class, enum machine_mode mode);
+extern int class_max_nregs (enum reg_class rclass, enum machine_mode mode);
#endif /* HAVE_MACHINE_MODES */
#ifdef REAL_VALUE_TYPE
class CLASS needed to hold a value of mode MODE. */
int
-class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,enum machine_mode mode)
+class_max_nregs (enum reg_class rclass ATTRIBUTE_UNUSED,enum machine_mode mode)
{
return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
}
/* Return 0 if undefined, 1 if always true or always false. */
int
-avr_simplify_comparison_p (enum machine_mode mode, RTX_CODE operator, rtx x)
+avr_simplify_comparison_p (enum machine_mode mode, RTX_CODE op, rtx x)
{
unsigned int max = (mode == QImode ? 0xff :
mode == HImode ? 0xffff :
mode == SImode ? 0xffffffff : 0);
- if (max && operator && GET_CODE (x) == CONST_INT)
+ if (max && op && GET_CODE (x) == CONST_INT)
{
- if (unsigned_condition (operator) != operator)
+ if (unsigned_condition (op) != op)
max >>= 1;
if (max != (INTVAL (x) & max)
}
else if (GET_CODE (dest) == MEM)
{
- const char *template;
+ const char *templ;
if (src == const0_rtx)
operands[1] = zero_reg_rtx;
- template = out_movqi_mr_r (insn, operands, real_l);
+ templ = out_movqi_mr_r (insn, operands, real_l);
if (!real_l)
- output_asm_insn (template, operands);
+ output_asm_insn (templ, operands);
operands[1] = src;
}
}
else if (GET_CODE (dest) == MEM)
{
- const char *template;
+ const char *templ;
if (src == const0_rtx)
operands[1] = zero_reg_rtx;
- template = out_movhi_mr_r (insn, operands, real_l);
+ templ = out_movhi_mr_r (insn, operands, real_l);
if (!real_l)
- output_asm_insn (template, operands);
+ output_asm_insn (templ, operands);
operands[1] = src;
return "";
}
else if (GET_CODE (dest) == MEM)
{
- const char *template;
+ const char *templ;
if (src == const0_rtx)
operands[1] = zero_reg_rtx;
- template = out_movsi_mr_r (insn, operands, real_l);
+ templ = out_movsi_mr_r (insn, operands, real_l);
if (!real_l)
- output_asm_insn (template, operands);
+ output_asm_insn (templ, operands);
operands[1] = src;
return "";
carefully hand-optimized in ?sh??i3_out. */
void
-out_shift_with_cnt (const char *template, rtx insn, rtx operands[],
+out_shift_with_cnt (const char *templ, rtx insn, rtx operands[],
int *len, int t_len)
{
rtx op[10];
else
{
while (count-- > 0)
- output_asm_insn (template, op);
+ output_asm_insn (templ, op);
}
return;
else
{
strcat (str, "\n1:\t");
- strcat (str, template);
+ strcat (str, templ);
strcat (str, second_label ? "\n2:\t" : "\n\t");
strcat (str, use_zero_reg ? AS1 (lsr,%3) : AS1 (dec,%3));
strcat (str, CR_TAB);
in class CLASS. */
enum reg_class
-preferred_reload_class (rtx x ATTRIBUTE_UNUSED, enum reg_class class)
+preferred_reload_class (rtx x ATTRIBUTE_UNUSED, enum reg_class rclass)
{
- return class;
+ return rclass;
}
int
-test_hard_reg_class (enum reg_class class, rtx x)
+test_hard_reg_class (enum reg_class rclass, rtx x)
{
int regno = true_regnum (x);
if (regno < 0)
return 0;
- if (TEST_HARD_REG_CLASS (class, regno))
+ if (TEST_HARD_REG_CLASS (rclass, regno))
return 1;
return 0;
legitimize_pic_address (rtx orig, rtx reg, rtx picreg)
{
rtx addr = orig;
- rtx new = orig;
+ rtx new_rtx = orig;
if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
{
}
tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), unspec);
- new = gen_const_mem (Pmode, gen_rtx_PLUS (Pmode, picreg, tmp));
+ new_rtx = gen_const_mem (Pmode, gen_rtx_PLUS (Pmode, picreg, tmp));
- emit_move_insn (reg, new);
+ emit_move_insn (reg, new_rtx);
if (picreg == pic_offset_table_rtx)
crtl->uses_pic_offset_table = 1;
return reg;
return gen_rtx_PLUS (Pmode, base, addr);
}
- return new;
+ return new_rtx;
}
\f
/* Stack frame layout. */
hard_regno_mode_ok (int regno, enum machine_mode mode)
{
/* Allow only dregs to store value of mode HI or QI */
- enum reg_class class = REGNO_REG_CLASS (regno);
+ enum reg_class rclass = REGNO_REG_CLASS (regno);
if (mode == CCmode)
return 0;
if (mode == V2HImode)
return D_REGNO_P (regno);
- if (class == CCREGS)
+ if (rclass == CCREGS)
return mode == BImode;
if (mode == PDImode || mode == V2PDImode)
return regno == REG_A0 || regno == REG_A1;
int
bfin_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
- enum reg_class class,
+ enum reg_class rclass,
int in ATTRIBUTE_UNUSED)
{
/* Make memory accesses slightly more expensive than any register-register
move. Also, penalize non-DP registers, since they need secondary
reloads to load and store. */
- if (! reg_class_subset_p (class, DPREGS))
+ if (! reg_class_subset_p (rclass, DPREGS))
return 10;
return 8;
}
/* Inform reload about cases where moving X with a mode MODE to a register in
- CLASS requires an extra scratch register. Return the class needed for the
+ RCLASS requires an extra scratch register. Return the class needed for the
scratch register. */
static enum reg_class
-bfin_secondary_reload (bool in_p, rtx x, enum reg_class class,
+bfin_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
enum machine_mode mode, secondary_reload_info *sri)
{
/* If we have HImode or QImode, we can only use DREGS as secondary registers;
rtx op2 = XEXP (x, 1);
int large_constant_p = ! satisfies_constraint_Ks7 (op2);
- if (class == PREGS || class == PREGS_CLOBBERED)
+ if (rclass == PREGS || rclass == PREGS_CLOBBERED)
return NO_REGS;
/* If destination is a DREG, we can do this without a scratch register
if the constant is valid for an add instruction. */
- if ((class == DREGS || class == DPREGS)
+ if ((rclass == DREGS || rclass == DPREGS)
&& ! large_constant_p)
return NO_REGS;
/* Reloading to anything other than a DREG? Use a PREG scratch
AREGS are an exception; they can only move to or from another register
in AREGS or one in DREGS. They can also be assigned the constant 0. */
if (x_class == AREGS || x_class == EVEN_AREGS || x_class == ODD_AREGS)
- return (class == DREGS || class == AREGS || class == EVEN_AREGS
- || class == ODD_AREGS
+ return (rclass == DREGS || rclass == AREGS || rclass == EVEN_AREGS
+ || rclass == ODD_AREGS
? NO_REGS : DREGS);
- if (class == AREGS || class == EVEN_AREGS || class == ODD_AREGS)
+ if (rclass == AREGS || rclass == EVEN_AREGS || rclass == ODD_AREGS)
{
if (code == MEM)
{
}
/* CCREGS can only be moved from/to DREGS. */
- if (class == CCREGS && x_class != DREGS)
+ if (rclass == CCREGS && x_class != DREGS)
return DREGS;
- if (x_class == CCREGS && class != DREGS)
+ if (x_class == CCREGS && rclass != DREGS)
return DREGS;
/* All registers other than AREGS can load arbitrary constants. The only
case that remains is MEM. */
if (code == MEM)
- if (! reg_class_subset_p (class, default_class))
+ if (! reg_class_subset_p (rclass, default_class))
return default_class;
return NO_REGS;
{
rtx xops[3];
/* The this parameter is passed as the first argument. */
- rtx this = gen_rtx_REG (Pmode, REG_R0);
+ rtx this_rtx = gen_rtx_REG (Pmode, REG_R0);
/* Adjust the this parameter by a fixed constant. */
if (delta)
{
- xops[1] = this;
+ xops[1] = this_rtx;
if (delta >= -64 && delta <= 63)
{
xops[0] = GEN_INT (delta);
output_asm_insn ("%h1 = %h0; %d1 = %d0; %2 = %2 + %1", xops);
xops[0] = gen_rtx_MEM (Pmode, p2tmp);
}
- xops[2] = this;
+ xops[2] = this_rtx;
output_asm_insn ("%1 = %0; %2 = %2 + %1;", xops);
}
/* Transfer between HILO_REGS and memory via secondary reloading. */
enum reg_class
-crx_secondary_reload_class (enum reg_class class,
+crx_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode ATTRIBUTE_UNUSED,
rtx x ATTRIBUTE_UNUSED)
{
- if (reg_classes_intersect_p (class, HILO_REGS)
+ if (reg_classes_intersect_p (rclass, HILO_REGS)
&& true_regnum (x) == -1)
return GENERAL_REGS;
}
/* Return the cost of moving data of mode MODE between a register of class
- * CLASS and memory; IN is zero if the value is to be written to memory,
+ * RCLASS and memory; IN is zero if the value is to be written to memory,
* nonzero if it is to be read in. This cost is relative to those in
* REGISTER_MOVE_COST. */
int
crx_memory_move_cost (enum machine_mode mode,
- enum reg_class class ATTRIBUTE_UNUSED,
+ enum reg_class rclass ATTRIBUTE_UNUSED,
int in ATTRIBUTE_UNUSED)
{
/* One LD or ST takes twice the time of a simple reg-reg move */
- if (reg_classes_intersect_p (class, GENERAL_REGS))
+ if (reg_classes_intersect_p (rclass, GENERAL_REGS))
{
/* printf ("GENERAL_REGS LD/ST = %d\n", 4 * HARD_REGNO_NREGS (0, mode));*/
return 4 * HARD_REGNO_NREGS (0, mode);
}
- else if (reg_classes_intersect_p (class, HILO_REGS))
+ else if (reg_classes_intersect_p (rclass, HILO_REGS))
{
/* HILO to memory and vice versa */
/* printf ("HILO_REGS %s = %d\n", in ? "LD" : "ST",
extern int frv_trampoline_size (void);
extern void frv_initialize_trampoline (rtx, rtx, rtx);
extern enum reg_class frv_secondary_reload_class
- (enum reg_class class,
+ (enum reg_class rclass,
enum machine_mode mode,
rtx x, int);
-extern int frv_class_likely_spilled_p (enum reg_class class);
+extern int frv_class_likely_spilled_p (enum reg_class rclass);
extern int frv_hard_regno_mode_ok (int, enum machine_mode);
extern int frv_hard_regno_nregs (int, enum machine_mode);
-extern int frv_class_max_nregs (enum reg_class class,
+extern int frv_class_max_nregs (enum reg_class rclass,
enum machine_mode mode);
extern int frv_legitimate_constant_p (rtx);
extern enum machine_mode frv_select_cc_mode (enum rtx_code, rtx, rtx);
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
{
- enum reg_class class;
+ enum reg_class rclass;
if (GPR_P (regno))
{
int gpr_reg = regno - GPR_FIRST;
if (gpr_reg == GR8_REG)
- class = GR8_REGS;
+ rclass = GR8_REGS;
else if (gpr_reg == GR9_REG)
- class = GR9_REGS;
+ rclass = GR9_REGS;
else if (gpr_reg == GR14_REG)
- class = FDPIC_FPTR_REGS;
+ rclass = FDPIC_FPTR_REGS;
else if (gpr_reg == FDPIC_REGNO)
- class = FDPIC_REGS;
+ rclass = FDPIC_REGS;
else if ((gpr_reg & 3) == 0)
- class = QUAD_REGS;
+ rclass = QUAD_REGS;
else if ((gpr_reg & 1) == 0)
- class = EVEN_REGS;
+ rclass = EVEN_REGS;
else
- class = GPR_REGS;
+ rclass = GPR_REGS;
}
else if (FPR_P (regno))
{
int fpr_reg = regno - GPR_FIRST;
if ((fpr_reg & 3) == 0)
- class = QUAD_FPR_REGS;
+ rclass = QUAD_FPR_REGS;
else if ((fpr_reg & 1) == 0)
- class = FEVEN_REGS;
+ rclass = FEVEN_REGS;
else
- class = FPR_REGS;
+ rclass = FPR_REGS;
}
else if (regno == LR_REGNO)
- class = LR_REG;
+ rclass = LR_REG;
else if (regno == LCR_REGNO)
- class = LCR_REG;
+ rclass = LCR_REG;
else if (ICC_P (regno))
- class = ICC_REGS;
+ rclass = ICC_REGS;
else if (FCC_P (regno))
- class = FCC_REGS;
+ rclass = FCC_REGS;
else if (ICR_P (regno))
- class = ICR_REGS;
+ rclass = ICR_REGS;
else if (FCR_P (regno))
- class = FCR_REGS;
+ rclass = FCR_REGS;
else if (ACC_P (regno))
{
int r = regno - ACC_FIRST;
if ((r & 3) == 0)
- class = QUAD_ACC_REGS;
+ rclass = QUAD_ACC_REGS;
else if ((r & 1) == 0)
- class = EVEN_ACC_REGS;
+ rclass = EVEN_ACC_REGS;
else
- class = ACC_REGS;
+ rclass = ACC_REGS;
}
else if (ACCG_P (regno))
- class = ACCG_REGS;
+ rclass = ACCG_REGS;
else
- class = NO_REGS;
+ rclass = NO_REGS;
- regno_reg_class[regno] = class;
+ regno_reg_class[regno] = rclass;
}
/* Check for small data option */
static rtx
frv_alloc_temp_reg (
frv_tmp_reg_t *info, /* which registers are available */
- enum reg_class class, /* register class desired */
+ enum reg_class rclass, /* register class desired */
enum machine_mode mode, /* mode to allocate register with */
int mark_as_used, /* register not available after allocation */
int no_abort) /* return NULL instead of aborting */
{
- int regno = info->next_reg[ (int)class ];
+ int regno = info->next_reg[ (int)rclass ];
int orig_regno = regno;
- HARD_REG_SET *reg_in_class = ®_class_contents[ (int)class ];
+ HARD_REG_SET *reg_in_class = ®_class_contents[ (int)rclass ];
int i, nr;
for (;;)
}
nr = HARD_REGNO_NREGS (regno, mode);
- info->next_reg[ (int)class ] = regno + nr;
+ info->next_reg[ (int)rclass ] = regno + nr;
if (mark_as_used)
for (i = 0; i < nr; i++)
HOST_WIDE_INT value;
int offset;
- if (code != 0 && !isalpha (code))
+ if (code != 0 && !ISALPHA (code))
value = 0;
else if (GET_CODE (x) == CONST_INT)
You should define these macros to indicate to the reload phase that it may
need to allocate at least one register for a reload in addition to the
register to contain the data. Specifically, if copying X to a register
- CLASS in MODE requires an intermediate register, you should define
+ RCLASS in MODE requires an intermediate register, you should define
`SECONDARY_INPUT_RELOAD_CLASS' to return the largest register class all of
whose registers can be used as intermediate registers or scratch registers.
- If copying a register CLASS in MODE to X requires an intermediate or scratch
+ If copying a register RCLASS in MODE to X requires an intermediate or scratch
register, `SECONDARY_OUTPUT_RELOAD_CLASS' should be defined to return the
largest register class required. If the requirements for input and output
reloads are the same, the macro `SECONDARY_RELOAD_CLASS' should be used
The values returned by these macros are often `GENERAL_REGS'. Return
`NO_REGS' if no spare register is needed; i.e., if X can be directly copied
- to or from a register of CLASS in MODE without requiring a scratch register.
+ to or from a register of RCLASS in MODE without requiring a scratch register.
Do not define this macro if it would always return `NO_REGS'.
If a scratch register is required (either with or without an intermediate
Define constraints for the reload register and scratch register that contain
a single register class. If the original reload register (whose class is
- CLASS) can meet the constraint given in the pattern, the value returned by
+ RCLASS) can meet the constraint given in the pattern, the value returned by
these macros is used for the class of the scratch register. Otherwise, two
additional reload registers are required. Their classes are obtained from
the constraints in the insn pattern.
This case often occurs between floating-point and general registers. */
enum reg_class
-frv_secondary_reload_class (enum reg_class class,
+frv_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode ATTRIBUTE_UNUSED,
rtx x,
int in_p ATTRIBUTE_UNUSED)
{
enum reg_class ret;
- switch (class)
+ switch (rclass)
{
default:
ret = NO_REGS;
\f
/* A C expression whose value is nonzero if pseudos that have been assigned to
- registers of class CLASS would likely be spilled because registers of CLASS
+ registers of class RCLASS would likely be spilled because registers of RCLASS
are needed for spill registers.
- The default value of this macro returns 1 if CLASS has exactly one register
+ The default value of this macro returns 1 if RCLASS has exactly one register
and zero otherwise. On most machines, this default should be used. Only
define this macro to some other expression if pseudo allocated by
`local-alloc.c' end up in memory because their hard registers were needed
register allocation. */
int
-frv_class_likely_spilled_p (enum reg_class class)
+frv_class_likely_spilled_p (enum reg_class rclass)
{
- switch (class)
+ switch (rclass)
{
default:
break;
\f
/* A C expression for the maximum number of consecutive registers of
- class CLASS needed to hold a value of mode MODE.
+ class RCLASS needed to hold a value of mode MODE.
This is closely related to the macro `HARD_REGNO_NREGS'. In fact, the value
- of the macro `CLASS_MAX_NREGS (CLASS, MODE)' should be the maximum value of
- `HARD_REGNO_NREGS (REGNO, MODE)' for all REGNO values in the class CLASS.
+ of the macro `CLASS_MAX_NREGS (RCLASS, MODE)' should be the maximum value of
+ `HARD_REGNO_NREGS (REGNO, MODE)' for all REGNO values in the class RCLASS.
This macro helps control the handling of multiple-word values in
the reload pass.
This declaration is required. */
int
-frv_class_max_nregs (enum reg_class class, enum machine_mode mode)
+frv_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
{
- if (class == ACCG_REGS)
+ if (rclass == ACCG_REGS)
/* An N-byte value requires N accumulator guards. */
return GET_MODE_SIZE (mode);
else
return h8300_constant_length (offset);
}
-/* Store the class of operand OP in *CLASS and return the length of any
- extra operand fields. SIZE is the number of bytes in OP. CLASS
+/* Store the class of operand OP in *OPCLASS and return the length of any
+ extra operand fields. SIZE is the number of bytes in OP. OPCLASS
can be null if only the length is needed. */
static unsigned int
-h8300_classify_operand (rtx op, int size, enum h8300_operand_class *class)
+h8300_classify_operand (rtx op, int size, enum h8300_operand_class *opclass)
{
enum h8300_operand_class dummy;
- if (class == 0)
- class = &dummy;
+ if (opclass == 0)
+ opclass = &dummy;
if (CONSTANT_P (op))
{
- *class = H8OP_IMMEDIATE;
+ *opclass = H8OP_IMMEDIATE;
/* Byte-sized immediates are stored in the opcode fields. */
if (size == 1)
op = XEXP (op, 0);
if (CONSTANT_P (op))
{
- *class = H8OP_MEM_ABSOLUTE;
+ *opclass = H8OP_MEM_ABSOLUTE;
return h8300_constant_length (op);
}
else if (GET_CODE (op) == PLUS && CONSTANT_P (XEXP (op, 1)))
{
- *class = H8OP_MEM_COMPLEX;
+ *opclass = H8OP_MEM_COMPLEX;
return h8300_displacement_length (op, size);
}
else if (GET_RTX_CLASS (GET_CODE (op)) == RTX_AUTOINC)
{
- *class = H8OP_MEM_COMPLEX;
+ *opclass = H8OP_MEM_COMPLEX;
return 0;
}
else if (register_operand (op, VOIDmode))
{
- *class = H8OP_MEM_BASE;
+ *opclass = H8OP_MEM_BASE;
return 0;
}
}
gcc_assert (register_operand (op, VOIDmode));
- *class = H8OP_REGISTER;
+ *opclass = H8OP_REGISTER;
return 0;
}
unsigned int
h8300_unary_length (rtx op)
{
- enum h8300_operand_class class;
+ enum h8300_operand_class opclass;
unsigned int size, operand_length;
size = GET_MODE_SIZE (GET_MODE (op));
- operand_length = h8300_classify_operand (op, size, &class);
- switch (class)
+ operand_length = h8300_classify_operand (op, size, &opclass);
+ switch (opclass)
{
case H8OP_REGISTER:
return 2;
static unsigned int
h8300_short_immediate_length (rtx op)
{
- enum h8300_operand_class class;
+ enum h8300_operand_class opclass;
unsigned int size, operand_length;
size = GET_MODE_SIZE (GET_MODE (op));
- operand_length = h8300_classify_operand (op, size, &class);
+ operand_length = h8300_classify_operand (op, size, &opclass);
- switch (class)
+ switch (opclass)
{
case H8OP_REGISTER:
return 2;
static unsigned int
h8300_bitfield_length (rtx op, rtx op2)
{
- enum h8300_operand_class class;
+ enum h8300_operand_class opclass;
unsigned int size, operand_length;
if (GET_CODE (op) == REG)
gcc_assert (GET_CODE (op) != REG);
size = GET_MODE_SIZE (GET_MODE (op));
- operand_length = h8300_classify_operand (op, size, &class);
+ operand_length = h8300_classify_operand (op, size, &opclass);
- switch (class)
+ switch (opclass)
{
case H8OP_MEM_BASE:
case H8OP_MEM_ABSOLUTE:
}
}
-/* Count the number of assembly instructions in a string TEMPLATE. */
+/* Count the number of assembly instructions in a string TEMPL. */
static unsigned int
-h8300_asm_insn_count (const char *template)
+h8300_asm_insn_count (const char *templ)
{
unsigned int count = 1;
- for (; *template; template++)
- if (*template == '\n')
+ for (; *templ; templ++)
+ if (*templ == '\n')
count++;
return count;
/* Mark a function appropriately. This should only be called for
functions for which we are not emitting COFF debugging information.
FILE is the assembler output file, NAME is the name of the
- function, and PUBLIC is nonzero if the function is globally
+ function, and PUB is nonzero if the function is globally
visible. */
void
-i386_pe_declare_function_type (FILE *file, const char *name, int public)
+i386_pe_declare_function_type (FILE *file, const char *name, int pub)
{
fprintf (file, "\t.def\t");
assemble_name (file, name);
fprintf (file, ";\t.scl\t%d;\t.type\t%d;\t.endef\n",
- public ? (int) C_EXT : (int) C_STAT,
+ pub ? (int) C_EXT : (int) C_STAT,
(int) DT_FCN << N_BTSHFT);
}
return 2;
}
-/* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
+/* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on RCLASS
to use when copying X into that class. */
enum reg_class
-ia64_preferred_reload_class (rtx x, enum reg_class class)
+ia64_preferred_reload_class (rtx x, enum reg_class rclass)
{
- switch (class)
+ switch (rclass)
{
case FR_REGS:
case FP_REGS:
break;
}
- return class;
+ return rclass;
}
/* This function returns the register class required for a secondary
- register when copying between one of the registers in CLASS, and X,
+ register when copying between one of the registers in RCLASS, and X,
using MODE. A return value of NO_REGS means that no secondary register
is required. */
enum reg_class
-ia64_secondary_reload_class (enum reg_class class,
+ia64_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
{
int regno = -1;
if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
regno = true_regnum (x);
- switch (class)
+ switch (rclass)
{
case BR_REGS:
case AR_M_REGS:
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
- rtx this, insn, funexp;
+ rtx this_rtx, insn, funexp;
unsigned int this_parmno;
unsigned int this_regno;
rtx delta_rtx;
if (!TARGET_REG_NAMES)
reg_names[this_regno] = ia64_reg_numbers[this_parmno];
- this = gen_rtx_REG (Pmode, this_regno);
+ this_rtx = gen_rtx_REG (Pmode, this_regno);
/* Apply the constant offset, if required. */
delta_rtx = GEN_INT (delta);
REG_POINTER (tmp) = 1;
if (delta && satisfies_constraint_I (delta_rtx))
{
- emit_insn (gen_ptr_extend_plus_imm (this, tmp, delta_rtx));
+ emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
delta = 0;
}
else
- emit_insn (gen_ptr_extend (this, tmp));
+ emit_insn (gen_ptr_extend (this_rtx, tmp));
}
if (delta)
{
emit_move_insn (tmp, delta_rtx);
delta_rtx = tmp;
}
- emit_insn (gen_adddi3 (this, this, delta_rtx));
+ emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
}
/* Apply the offset from the vtable, if required. */
{
rtx t = gen_rtx_REG (ptr_mode, 2);
REG_POINTER (t) = 1;
- emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
+ emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
if (satisfies_constraint_I (vcall_offset_rtx))
{
emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
emit_insn (gen_ptr_extend (tmp, t));
}
else
- emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
if (vcall_offset)
{
else
emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
- emit_insn (gen_adddi3 (this, this, tmp));
+ emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
}
/* Generate a tail call to the target function. */
{
if (p_info->const_add != 0)
{
- HOST_WIDE_INT new = INTVAL (cmp1) + p_info->const_add;
+ HOST_WIDE_INT new_const = INTVAL (cmp1) + p_info->const_add;
/* If modification of cmp1 caused overflow,
we would get the wrong answer if we follow the usual path;
thus, x > 0xffffffffU would turn into x > 0U. */
if ((p_info->unsignedp
- ? (unsigned HOST_WIDE_INT) new >
+ ? (unsigned HOST_WIDE_INT) new_const >
(unsigned HOST_WIDE_INT) INTVAL (cmp1)
- : new > INTVAL (cmp1))
+ : new_const > INTVAL (cmp1))
!= (p_info->const_add > 0))
{
/* This test is always true, but if INVERT is true then
return result;
}
else
- cmp1 = GEN_INT (new);
+ cmp1 = GEN_INT (new_const);
}
}
/* Used by m32c_register_move_cost to determine if a move is
impossibly expensive. */
static int
-class_can_hold_mode (int class, enum machine_mode mode)
+class_can_hold_mode (int rclass, enum machine_mode mode)
{
/* Cache the results: 0=untested 1=no 2=yes */
static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
- if (results[class][mode] == 0)
+ if (results[rclass][mode] == 0)
{
int r, n, i;
- results[class][mode] = 1;
+ results[rclass][mode] = 1;
for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
- if (class_contents[class][0] & (1 << r)
+ if (class_contents[rclass][0] & (1 << r)
&& HARD_REGNO_MODE_OK (r, mode))
{
int ok = 1;
n = HARD_REGNO_NREGS (r, mode);
for (i = 1; i < n; i++)
- if (!(class_contents[class][0] & (1 << (r + i))))
+ if (!(class_contents[rclass][0] & (1 << (r + i))))
ok = 0;
if (ok)
{
- results[class][mode] = 2;
+ results[rclass][mode] = 2;
break;
}
}
}
#if DEBUG0
fprintf (stderr, "class %s can hold %s? %s\n",
- class_names[class], mode_name[mode],
- (results[class][mode] == 2) ? "yes" : "no");
+ class_names[rclass], mode_name[mode],
+ (results[rclass][mode] == 2) ? "yes" : "no");
#endif
- return results[class][mode] == 2;
+ return results[rclass][mode] == 2;
}
/* Run-time Target Specification. */
char *
m32c_output_compare (rtx insn, rtx *operands)
{
- static char template[] = ";cmp.b\t%1,%0";
+ static char templ[] = ";cmp.b\t%1,%0";
/* ^ 5 */
- template[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
+ templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
if (m32c_compare_redundant (insn, operands))
{
#if DEBUG_CMP
fprintf(stderr, "cbranch: cmp not needed\n");
#endif
- return template;
+ return templ;
}
#if DEBUG_CMP
- fprintf(stderr, "cbranch: cmp needed: `%s'\n", template);
+ fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ);
#endif
- return template + 1;
+ return templ + 1;
}
#undef TARGET_ENCODE_SECTION_INFO
}
enum reg_class
-preferred_reload_class (rtx operand, enum reg_class class)
+preferred_reload_class (rtx operand, enum reg_class rclass)
{
enum machine_mode mode;
if (debug_m6811)
{
- printf ("Preferred reload: (class=%s): ", reg_class_names[class]);
+ printf ("Preferred reload: (class=%s): ", reg_class_names[rclass]);
}
- if (class == D_OR_A_OR_S_REGS && SP_REG_P (operand))
+ if (rclass == D_OR_A_OR_S_REGS && SP_REG_P (operand))
return m68hc11_base_reg_class;
- if (class >= S_REGS && (GET_CODE (operand) == MEM
+ if (rclass >= S_REGS && (GET_CODE (operand) == MEM
|| GET_CODE (operand) == CONST_INT))
{
/* S_REGS class must not be used. The movhi template does not
work to move a memory to a soft register.
Restrict to a hard reg. */
- switch (class)
+ switch (rclass)
{
default:
case G_REGS:
case D_OR_A_OR_S_REGS:
- class = A_OR_D_REGS;
+ rclass = A_OR_D_REGS;
break;
case A_OR_S_REGS:
- class = A_REGS;
+ rclass = A_REGS;
break;
case D_OR_SP_OR_S_REGS:
- class = D_OR_SP_REGS;
+ rclass = D_OR_SP_REGS;
break;
case D_OR_Y_OR_S_REGS:
- class = D_OR_Y_REGS;
+ rclass = D_OR_Y_REGS;
break;
case D_OR_X_OR_S_REGS:
- class = D_OR_X_REGS;
+ rclass = D_OR_X_REGS;
break;
case SP_OR_S_REGS:
- class = SP_REGS;
+ rclass = SP_REGS;
break;
case Y_OR_S_REGS:
- class = Y_REGS;
+ rclass = Y_REGS;
break;
case X_OR_S_REGS:
- class = X_REGS;
+ rclass = X_REGS;
break;
case D_OR_S_REGS:
- class = D_REGS;
+ rclass = D_REGS;
}
}
- else if (class == Y_REGS && GET_CODE (operand) == MEM)
+ else if (rclass == Y_REGS && GET_CODE (operand) == MEM)
{
- class = Y_REGS;
+ rclass = Y_REGS;
}
- else if (class == A_OR_D_REGS && GET_MODE_SIZE (mode) == 4)
+ else if (rclass == A_OR_D_REGS && GET_MODE_SIZE (mode) == 4)
{
- class = D_OR_X_REGS;
+ rclass = D_OR_X_REGS;
}
- else if (class >= S_REGS && S_REG_P (operand))
+ else if (rclass >= S_REGS && S_REG_P (operand))
{
- switch (class)
+ switch (rclass)
{
default:
case G_REGS:
case D_OR_A_OR_S_REGS:
- class = A_OR_D_REGS;
+ rclass = A_OR_D_REGS;
break;
case A_OR_S_REGS:
- class = A_REGS;
+ rclass = A_REGS;
break;
case D_OR_SP_OR_S_REGS:
- class = D_OR_SP_REGS;
+ rclass = D_OR_SP_REGS;
break;
case D_OR_Y_OR_S_REGS:
- class = D_OR_Y_REGS;
+ rclass = D_OR_Y_REGS;
break;
case D_OR_X_OR_S_REGS:
- class = D_OR_X_REGS;
+ rclass = D_OR_X_REGS;
break;
case SP_OR_S_REGS:
- class = SP_REGS;
+ rclass = SP_REGS;
break;
case Y_OR_S_REGS:
- class = Y_REGS;
+ rclass = Y_REGS;
break;
case X_OR_S_REGS:
- class = X_REGS;
+ rclass = X_REGS;
break;
case D_OR_S_REGS:
- class = D_REGS;
+ rclass = D_REGS;
}
}
- else if (class >= S_REGS)
+ else if (rclass >= S_REGS)
{
if (debug_m6811)
{
- printf ("Class = %s for: ", reg_class_names[class]);
+ printf ("Class = %s for: ", reg_class_names[rclass]);
fflush (stdout);
debug_rtx (operand);
}
if (debug_m6811)
{
- printf (" => class=%s\n", reg_class_names[class]);
+ printf (" => class=%s\n", reg_class_names[rclass]);
fflush (stdout);
debug_rtx (operand);
}
- return class;
+ return rclass;
}
/* Return 1 if the operand is a valid indexed addressing mode.
/* Cost of moving memory. */
int
-m68hc11_memory_move_cost (enum machine_mode mode, enum reg_class class,
+m68hc11_memory_move_cost (enum machine_mode mode, enum reg_class rclass,
int in ATTRIBUTE_UNUSED)
{
- if (class <= H_REGS && class > NO_REGS)
+ if (rclass <= H_REGS && rclass > NO_REGS)
{
if (GET_MODE_SIZE (mode) <= 2)
return COSTS_N_INSNS (1) + (reload_completed | reload_in_progress);
}
}
-/* Implement SECONDARY_RELOAD_CLASS. If CLASS contains r15, and we can't
+/* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
directly move X into it, use r1-r14 as a temporary. */
enum reg_class
-mcore_secondary_reload_class (enum reg_class class,
+mcore_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
{
- if (TEST_HARD_REG_BIT (reg_class_contents[class], 15)
+ if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
&& !mcore_r15_operand_p (x))
return LRW_REGS;
return NO_REGS;
}
/* Return the reg_class to use when reloading the rtx X into the class
- CLASS. If X is too complex to move directly into r15, prefer to
+ RCLASS. If X is too complex to move directly into r15, prefer to
use LRW_REGS instead. */
enum reg_class
-mcore_reload_class (rtx x, enum reg_class class)
+mcore_reload_class (rtx x, enum reg_class rclass)
{
- if (reg_class_subset_p (LRW_REGS, class) && !mcore_r15_operand_p (x))
+ if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
return LRW_REGS;
- return class;
+ return rclass;
}
/* Tell me if a pair of reg/subreg rtx's actually refer to the same
mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
{
unsigned int size;
- enum mode_class class;
+ enum mode_class mclass;
if (mode == CCV2mode)
return (ISA_HAS_8CC
}
size = GET_MODE_SIZE (mode);
- class = GET_MODE_CLASS (mode);
+ mclass = GET_MODE_CLASS (mode);
if (GP_REG_P (regno))
return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
|| mode == DImode))
return true;
- if (class == MODE_FLOAT
- || class == MODE_COMPLEX_FLOAT
- || class == MODE_VECTOR_FLOAT)
+ if (mclass == MODE_FLOAT
+ || mclass == MODE_COMPLEX_FLOAT
+ || mclass == MODE_VECTOR_FLOAT)
return size <= UNITS_PER_FPVALUE;
/* Allow integer modes that fit into a single register. We need
to put integers into FPRs when using instructions like CVT
and TRUNC. There's no point allowing sizes smaller than a word,
because the FPU has no appropriate load/store instructions. */
- if (class == MODE_INT)
+ if (mclass == MODE_INT)
return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
}
}
if (ALL_COP_REG_P (regno))
- return class == MODE_INT && size <= UNITS_PER_WORD;
+ return mclass == MODE_INT && size <= UNITS_PER_WORD;
if (regno == GOT_VERSION_REGNUM)
return mode == SImode;
in mips_hard_regno_nregs. */
int
-mips_class_max_nregs (enum reg_class class, enum machine_mode mode)
+mips_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
{
int size;
HARD_REG_SET left;
size = 0x8000;
- COPY_HARD_REG_SET (left, reg_class_contents[(int) class]);
+ COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
{
size = MIN (size, 4);
bool
mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
enum machine_mode to ATTRIBUTE_UNUSED,
- enum reg_class class)
+ enum reg_class rclass)
{
/* There are several problems with changing the modes of values
in floating-point registers:
not ask it to treat the value as having a different format.
We therefore disallow all mode changes involving FPRs. */
- return reg_classes_intersect_p (FP_REGS, class);
+ return reg_classes_intersect_p (FP_REGS, rclass);
}
/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
/* Implement PREFERRED_RELOAD_CLASS. */
enum reg_class
-mips_preferred_reload_class (rtx x, enum reg_class class)
+mips_preferred_reload_class (rtx x, enum reg_class rclass)
{
- if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
+ if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, rclass))
return LEA_REGS;
- if (reg_class_subset_p (FP_REGS, class)
+ if (reg_class_subset_p (FP_REGS, rclass)
&& mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
return FP_REGS;
- if (reg_class_subset_p (GR_REGS, class))
- class = GR_REGS;
+ if (reg_class_subset_p (GR_REGS, rclass))
+ rclass = GR_REGS;
- if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
- class = M16_REGS;
+ if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, rclass))
+ rclass = M16_REGS;
- return class;
+ return rclass;
}
/* Implement REGISTER_MOVE_COST. */
}
/* Return the register class required for a secondary register when
- copying between one of the registers in CLASS and value X, which
+ copying between one of the registers in RCLASS and value X, which
has mode MODE. X is the source of the move if IN_P, otherwise it
is the destination. Return NO_REGS if no secondary register is
needed. */
enum reg_class
-mips_secondary_reload_class (enum reg_class class,
+mips_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode, rtx x, bool in_p)
{
int regno;
/* If X is a constant that cannot be loaded into $25, it must be loaded
into some other GPR. No other register class allows a direct move. */
if (mips_dangerous_for_la25_p (x))
- return reg_class_subset_p (class, LEA_REGS) ? NO_REGS : LEA_REGS;
+ return reg_class_subset_p (rclass, LEA_REGS) ? NO_REGS : LEA_REGS;
regno = true_regnum (x);
if (TARGET_MIPS16)
{
/* In MIPS16 mode, every move must involve a member of M16_REGS. */
- if (!reg_class_subset_p (class, M16_REGS) && !M16_REG_P (regno))
+ if (!reg_class_subset_p (rclass, M16_REGS) && !M16_REG_P (regno))
return M16_REGS;
/* We can't really copy to HI or LO at all in MIPS16 mode. */
- if (in_p ? reg_classes_intersect_p (class, ACC_REGS) : ACC_REG_P (regno))
+ if (in_p ? reg_classes_intersect_p (rclass, ACC_REGS) : ACC_REG_P (regno))
return M16_REGS;
return NO_REGS;
/* Copying from accumulator registers to anywhere other than a general
register requires a temporary general register. */
- if (reg_class_subset_p (class, ACC_REGS))
+ if (reg_class_subset_p (rclass, ACC_REGS))
return GP_REG_P (regno) ? NO_REGS : GR_REGS;
if (ACC_REG_P (regno))
- return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
+ return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
/* We can only copy a value to a condition code register from a
floating-point register, and even then we require a scratch
floating-point register. We can only copy a value out of a
condition-code register into a general register. */
- if (reg_class_subset_p (class, ST_REGS))
+ if (reg_class_subset_p (rclass, ST_REGS))
{
if (in_p)
return FP_REGS;
{
if (!in_p)
return FP_REGS;
- return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
+ return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
}
- if (reg_class_subset_p (class, FP_REGS))
+ if (reg_class_subset_p (rclass, FP_REGS))
{
if (MEM_P (x)
&& (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
return GR_REGS;
}
if (FP_REG_P (regno))
- return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
+ return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
return NO_REGS;
}
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
- rtx this, temp1, temp2, insn, fnaddr;
+ rtx this_rtx, temp1, temp2, insn, fnaddr;
bool use_sibcall_p;
/* Pretend to be a post-reload pass while generating rtl. */
/* Find out which register contains the "this" pointer. */
if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
- this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
else
- this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
- /* Add DELTA to THIS. */
+ /* Add DELTA to THIS_RTX. */
if (delta != 0)
{
rtx offset = GEN_INT (delta);
mips_emit_move (temp1, offset);
offset = temp1;
}
- emit_insn (gen_add3_insn (this, this, offset));
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
}
- /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
+ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
if (vcall_offset != 0)
{
rtx addr;
- /* Set TEMP1 to *THIS. */
- mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
+ /* Set TEMP1 to *THIS_RTX. */
+ mips_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
- /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
+ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
addr = mips_add_offset (temp2, temp1, vcall_offset);
- /* Load the offset and add it to THIS. */
+ /* Load the offset and add it to THIS_RTX. */
mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
- emit_insn (gen_add3_insn (this, this, temp1));
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
}
/* Jump to the target function. Use a sibcall if direct jumps are
We need to extend the reload class of REMAINDER_REG and HIMULT_REG. */
enum reg_class
-mmix_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, enum reg_class class)
+mmix_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, enum reg_class rclass)
{
/* FIXME: Revisit. */
return GET_CODE (x) == MOD && GET_MODE (x) == DImode
- ? REMAINDER_REG : class;
+ ? REMAINDER_REG : rclass;
}
/* PREFERRED_OUTPUT_RELOAD_CLASS.
enum reg_class
mmix_preferred_output_reload_class (rtx x ATTRIBUTE_UNUSED,
- enum reg_class class)
+ enum reg_class rclass)
{
/* FIXME: Revisit. */
return GET_CODE (x) == MOD && GET_MODE (x) == DImode
- ? REMAINDER_REG : class;
+ ? REMAINDER_REG : rclass;
}
/* SECONDARY_RELOAD_CLASS.
We need to reload regs of REMAINDER_REG and HIMULT_REG elsewhere. */
enum reg_class
-mmix_secondary_reload_class (enum reg_class class,
+mmix_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode ATTRIBUTE_UNUSED,
rtx x ATTRIBUTE_UNUSED,
int in_p ATTRIBUTE_UNUSED)
{
- if (class == REMAINDER_REG
- || class == HIMULT_REG
- || class == SYSTEM_REGS)
+ if (rclass == REMAINDER_REG
+ || rclass == HIMULT_REG
+ || rclass == SYSTEM_REGS)
return GENERAL_REGS;
return NO_REGS;
}
/* What (if any) secondary registers are needed to move IN with mode
- MODE into a register in register class CLASS.
+ MODE into a register in register class RCLASS.
We might be able to simplify this. */
enum reg_class
-mn10300_secondary_reload_class (enum reg_class class, enum machine_mode mode,
+mn10300_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
rtx in)
{
/* Memory loads less than a full word wide can't have an
&& GET_CODE (SUBREG_REG (in)) == REG
&& REGNO (SUBREG_REG (in)) >= FIRST_PSEUDO_REGISTER))
&& (mode == QImode || mode == HImode)
- && (class == ADDRESS_REGS || class == SP_REGS
- || class == SP_OR_ADDRESS_REGS))
+ && (rclass == ADDRESS_REGS || rclass == SP_REGS
+ || rclass == SP_OR_ADDRESS_REGS))
{
if (TARGET_AM33)
return DATA_OR_EXTENDED_REGS;
/* We can't directly load sp + const_int into a data register;
we must use an address register as an intermediate. */
- if (class != SP_REGS
- && class != ADDRESS_REGS
- && class != SP_OR_ADDRESS_REGS
- && class != SP_OR_EXTENDED_REGS
- && class != ADDRESS_OR_EXTENDED_REGS
- && class != SP_OR_ADDRESS_OR_EXTENDED_REGS
+ if (rclass != SP_REGS
+ && rclass != ADDRESS_REGS
+ && rclass != SP_OR_ADDRESS_REGS
+ && rclass != SP_OR_EXTENDED_REGS
+ && rclass != ADDRESS_OR_EXTENDED_REGS
+ && rclass != SP_OR_ADDRESS_OR_EXTENDED_REGS
&& (in == stack_pointer_rtx
|| (GET_CODE (in) == PLUS
&& (XEXP (in, 0) == stack_pointer_rtx
|| XEXP (in, 1) == stack_pointer_rtx))
return GENERAL_REGS;
- if (TARGET_AM33_2 && class == FP_REGS
+ if (TARGET_AM33_2 && rclass == FP_REGS
&& GET_CODE (in) == MEM
&& ! (GET_CODE (in) == MEM && !CONSTANT_ADDRESS_P (XEXP (in, 0))))
{
}
\f
static enum reg_class
-pa_secondary_reload (bool in_p, rtx x, enum reg_class class,
+pa_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
enum machine_mode mode, secondary_reload_info *sri)
{
int is_symbolic, regno;
/* Handle the easy stuff first. */
- if (class == R1_REGS)
+ if (rclass == R1_REGS)
return NO_REGS;
if (REG_P (x))
{
regno = REGNO (x);
- if (class == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
+ if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
return NO_REGS;
}
else
generation requires %r1 as a scratch register. */
if (flag_pic
&& (mode == SImode || mode == DImode)
- && FP_REG_CLASS_P (class)
+ && FP_REG_CLASS_P (rclass)
&& (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
{
sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
memory loads and stores. */
if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
&& GET_MODE_CLASS (mode) == MODE_INT
- && FP_REG_CLASS_P (class))
+ && FP_REG_CLASS_P (rclass))
{
/* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check
the secondary reload needed for a pseudo. It never passes a
/* We need a secondary register (GPR) for copies between the SAR
and anything other than a general register. */
- if (class == SHIFT_REGS && (regno <= 0 || regno >= 32))
+ if (rclass == SHIFT_REGS && (regno <= 0 || regno >= 32))
{
sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
return NO_REGS;
well as secondary memory. */
if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
&& (REGNO_REG_CLASS (regno) == SHIFT_REGS
- && FP_REG_CLASS_P (class)))
+ && FP_REG_CLASS_P (rclass)))
{
sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
return NO_REGS;
static void
pa_combine_instructions (void)
{
- rtx anchor, new;
+ rtx anchor, new_rtx;
/* This can get expensive since the basic algorithm is on the
order of O(n^2) (or worse). Only do it for -O2 or higher
may be combined with "floating" insns. As the name implies,
"anchor" instructions don't move, while "floating" insns may
move around. */
- new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
- new = make_insn_raw (new);
+ new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
+ new_rtx = make_insn_raw (new_rtx);
for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
{
{
/* If ANCHOR and FLOATER can be combined, then we're
done with this pass. */
- if (pa_can_combine_p (new, anchor, floater, 0,
+ if (pa_can_combine_p (new_rtx, anchor, floater, 0,
SET_DEST (PATTERN (floater)),
XEXP (SET_SRC (PATTERN (floater)), 0),
XEXP (SET_SRC (PATTERN (floater)), 1)))
{
if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
{
- if (pa_can_combine_p (new, anchor, floater, 0,
+ if (pa_can_combine_p (new_rtx, anchor, floater, 0,
SET_DEST (PATTERN (floater)),
XEXP (SET_SRC (PATTERN (floater)), 0),
XEXP (SET_SRC (PATTERN (floater)), 1)))
}
else
{
- if (pa_can_combine_p (new, anchor, floater, 0,
+ if (pa_can_combine_p (new_rtx, anchor, floater, 0,
SET_DEST (PATTERN (floater)),
SET_SRC (PATTERN (floater)),
SET_SRC (PATTERN (floater))))
{
/* If ANCHOR and FLOATER can be combined, then we're
done with this pass. */
- if (pa_can_combine_p (new, anchor, floater, 1,
+ if (pa_can_combine_p (new_rtx, anchor, floater, 1,
SET_DEST (PATTERN (floater)),
XEXP (SET_SRC (PATTERN (floater)),
0),
}
static int
-pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
+pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
rtx src1, rtx src2)
{
int insn_code_number;
If the pattern doesn't match or the constraints
aren't met keep searching for a suitable floater
insn. */
- XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
- XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
- INSN_CODE (new) = -1;
- insn_code_number = recog_memoized (new);
+ XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
+ XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
+ INSN_CODE (new_rtx) = -1;
+ insn_code_number = recog_memoized (new_rtx);
if (insn_code_number < 0
- || (extract_insn (new), ! constrain_operands (1)))
+ || (extract_insn (new_rtx), ! constrain_operands (1)))
return 0;
if (reversed)
#endif
/* Return true if a change from mode FROM to mode TO for a register
- in register class CLASS is invalid. */
+ in register class RCLASS is invalid. */
bool
pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
- enum reg_class class)
+ enum reg_class rclass)
{
if (from == to)
return false;
On the 64-bit target, this conflicts with the definition of
LOAD_EXTEND_OP. Thus, we can't allow changing between modes
with different sizes in the floating-point registers. */
- if (MAYBE_FP_REG_CLASS_P (class))
+ if (MAYBE_FP_REG_CLASS_P (rclass))
return true;
/* HARD_REGNO_MODE_OK places modes with sizes larger than a word
#define LEGITIMIZE_RELOAD_ADDRESS(AD, MODE, OPNUM, TYPE, IND, WIN) \
do { \
long offset, newoffset, mask; \
- rtx new, temp = NULL_RTX; \
+ rtx new_rtx, temp = NULL_RTX; \
\
mask = (GET_MODE_CLASS (MODE) == MODE_FLOAT \
? (INT14_OK_STRICT ? 0x3fff : 0x1f) : 0x3fff); \
temp = simplify_binary_operation (PLUS, Pmode, \
XEXP (AD, 0), XEXP (AD, 1)); \
\
- new = temp ? temp : AD; \
+ new_rtx = temp ? temp : AD; \
\
if (optimize \
- && GET_CODE (new) == PLUS \
- && GET_CODE (XEXP (new, 0)) == REG \
- && GET_CODE (XEXP (new, 1)) == CONST_INT) \
+ && GET_CODE (new_rtx) == PLUS \
+ && GET_CODE (XEXP (new_rtx, 0)) == REG \
+ && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT) \
{ \
- offset = INTVAL (XEXP ((new), 1)); \
+ offset = INTVAL (XEXP ((new_rtx), 1)); \
\
/* Choose rounding direction. Round up if we are >= halfway. */ \
if ((offset & mask) >= ((mask + 1) / 2)) \
\
if (newoffset != 0 && VAL_14_BITS_P (newoffset)) \
{ \
- temp = gen_rtx_PLUS (Pmode, XEXP (new, 0), \
+ temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0), \
GEN_INT (newoffset)); \
AD = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));\
push_reload (XEXP (AD, 0), 0, &XEXP (AD, 0), 0, \
enum machine_mode mode = GET_MODE (target);
int n_elts = GET_MODE_NUNITS (mode);
int n_var = 0;
- rtx x, new, tmp, constant_op, op1, op2;
+ rtx x, new_rtx, tmp, constant_op, op1, op2;
int i;
for (i = 0; i < n_elts; ++i)
if (n_var == 2)
{
/* The vector is initialized only with non-constants. */
- new = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
+ new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
XVECEXP (vals, 0, 1));
- emit_move_insn (target, new);
+ emit_move_insn (target, new_rtx);
return;
}
emit_move_insn (tmp, constant_op);
if (CONSTANT_P (op1))
- new = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
+ new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
else
- new = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
+ new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
- emit_move_insn (target, new);
+ emit_move_insn (target, new_rtx);
}
void
}
/* Return the register class of a scratch register needed to copy IN into
- or out of a register in CLASS in MODE. If it can be done directly,
+ or out of a register in RCLASS in MODE. If it can be done directly,
NO_REGS is returned. */
enum reg_class
-rs6000_secondary_reload_class (enum reg_class class,
+rs6000_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode ATTRIBUTE_UNUSED,
rtx in)
{
On Darwin, pic addresses require a load from memory, which
needs a base register. */
- if (class != BASE_REGS
+ if (rclass != BASE_REGS
&& (GET_CODE (in) == SYMBOL_REF
|| GET_CODE (in) == HIGH
|| GET_CODE (in) == LABEL_REF
/* We can place anything into GENERAL_REGS and can put GENERAL_REGS
into anything. */
- if (class == GENERAL_REGS || class == BASE_REGS
+ if (rclass == GENERAL_REGS || rclass == BASE_REGS
|| (regno >= 0 && INT_REGNO_P (regno)))
return NO_REGS;
/* Constants, memory, and FP registers can go into FP registers. */
if ((regno == -1 || FP_REGNO_P (regno))
- && (class == FLOAT_REGS || class == NON_SPECIAL_REGS))
+ && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
/* Memory, and AltiVec registers can go into AltiVec registers. */
if ((regno == -1 || ALTIVEC_REGNO_P (regno))
- && class == ALTIVEC_REGS)
+ && rclass == ALTIVEC_REGS)
return NO_REGS;
/* We can copy among the CR registers. */
- if ((class == CR_REGS || class == CR0_REGS)
+ if ((rclass == CR_REGS || rclass == CR0_REGS)
&& regno >= 0 && CR_REGNO_P (regno))
return NO_REGS;
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
- rtx this, insn, funexp;
+ rtx this_rtx, insn, funexp;
reload_completed = 1;
epilogue_completed = 1;
/* Find the "this" pointer. If the function returns a structure,
the structure return pointer is in r3. */
if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
- this = gen_rtx_REG (Pmode, 4);
+ this_rtx = gen_rtx_REG (Pmode, 4);
else
- this = gen_rtx_REG (Pmode, 3);
+ this_rtx = gen_rtx_REG (Pmode, 3);
/* Apply the constant offset, if required. */
if (delta)
{
rtx delta_rtx = GEN_INT (delta);
emit_insn (TARGET_32BIT
- ? gen_addsi3 (this, this, delta_rtx)
- : gen_adddi3 (this, this, delta_rtx));
+ ? gen_addsi3 (this_rtx, this_rtx, delta_rtx)
+ : gen_adddi3 (this_rtx, this_rtx, delta_rtx));
}
/* Apply the offset from the vtable, if required. */
rtx vcall_offset_rtx = GEN_INT (vcall_offset);
rtx tmp = gen_rtx_REG (Pmode, 12);
- emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
{
emit_insn (TARGET_32BIT
emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
}
emit_insn (TARGET_32BIT
- ? gen_addsi3 (this, this, tmp)
- : gen_adddi3 (this, this, tmp));
+ ? gen_addsi3 (this_rtx, this_rtx, tmp)
+ : gen_adddi3 (this_rtx, this_rtx, tmp));
}
/* Generate a tail call to the target function. */
}
static inline bool
-compare_section_name (const char *section, const char *template)
+compare_section_name (const char *section, const char *templ)
{
int len;
- len = strlen (template);
- return (strncmp (section, template, len) == 0
+ len = strlen (templ);
+ return (strncmp (section, templ, len) == 0
&& (section[len] == 0 || section[len] == '.'));
}
or from memory. */
int
-rs6000_memory_move_cost (enum machine_mode mode, enum reg_class class,
+rs6000_memory_move_cost (enum machine_mode mode, enum reg_class rclass,
int in ATTRIBUTE_UNUSED)
{
- if (reg_classes_intersect_p (class, GENERAL_REGS))
+ if (reg_classes_intersect_p (rclass, GENERAL_REGS))
return 4 * hard_regno_nregs[0][mode];
- else if (reg_classes_intersect_p (class, FLOAT_REGS))
+ else if (reg_classes_intersect_p (rclass, FLOAT_REGS))
return 4 * hard_regno_nregs[32][mode];
- else if (reg_classes_intersect_p (class, ALTIVEC_REGS))
+ else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
return 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
else
- return 4 + rs6000_register_move_cost (mode, class, GENERAL_REGS);
+ return 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
}
/* Returns a code for a target-specific builtin that implements
return ret;
}
-/* Emit a SImode compare and swap instruction setting MEM to NEW if OLD
+/* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
matches CMP.
Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
conditional branch testing the result. */
static rtx
-s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new)
+s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
{
rtx ret;
- emit_insn (gen_sync_compare_and_swap_ccsi (old, mem, cmp, new));
+ emit_insn (gen_sync_compare_and_swap_ccsi (old, mem, cmp, new_rtx));
ret = gen_rtx_fmt_ee (code, VOIDmode, s390_compare_emitted, const0_rtx);
s390_compare_emitted = NULL_RTX;
return false;
}
-/* Given an rtx OP being reloaded into a reg required to be in class CLASS,
+/* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
return the class of reg to actually use. */
enum reg_class
-s390_preferred_reload_class (rtx op, enum reg_class class)
+s390_preferred_reload_class (rtx op, enum reg_class rclass)
{
switch (GET_CODE (op))
{
case CONST_DOUBLE:
case CONST_INT:
if (legitimate_reload_constant_p (op))
- return class;
+ return rclass;
else
return NO_REGS;
case LABEL_REF:
case SYMBOL_REF:
case CONST:
- if (reg_class_subset_p (ADDR_REGS, class))
+ if (reg_class_subset_p (ADDR_REGS, rclass))
return ADDR_REGS;
else
return NO_REGS;
break;
}
- return class;
+ return rclass;
}
/* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
}
/* Inform reload about cases where moving X with a mode MODE to a register in
- CLASS requires an extra scratch or immediate register. Return the class
+ RCLASS requires an extra scratch or immediate register. Return the class
needed for the immediate register. */
static enum reg_class
-s390_secondary_reload (bool in_p, rtx x, enum reg_class class,
+s390_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
enum machine_mode mode, secondary_reload_info *sri)
{
/* Intermediate register needed. */
- if (reg_classes_intersect_p (CC_REGS, class))
+ if (reg_classes_intersect_p (CC_REGS, rclass))
return GENERAL_REGS;
if (TARGET_Z10)
/* For GENERAL_REGS a displacement overflow is no problem if occurring
in a s_operand address since we may fallback to lm/stm. So we only
have to care about overflows in the b+i+d case. */
- if ((reg_classes_intersect_p (GENERAL_REGS, class)
+ if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
&& s390_class_max_nregs (GENERAL_REGS, mode) > 1
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
/* For FP_REGS no lm/stm is available so this check is triggered
for displacement overflows in b+i+d and b+d like addresses. */
- || (reg_classes_intersect_p (FP_REGS, class)
+ || (reg_classes_intersect_p (FP_REGS, rclass)
&& s390_class_max_nregs (FP_REGS, mode) > 1))
{
if (in_p)
/* A scratch address register is needed when a symbolic constant is
copied to r0 compiling with -fPIC. In other cases the target
register might be used as temporary (see legitimize_pic_address). */
- if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && class != ADDR_REGS)
+ if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
sri->icode = (TARGET_64BIT ?
CODE_FOR_reloaddi_PIC_addr :
CODE_FOR_reloadsi_PIC_addr);
legitimize_pic_address (rtx orig, rtx reg)
{
rtx addr = orig;
- rtx new = orig;
+ rtx new_rtx = orig;
rtx base;
gcc_assert (!TLS_SYMBOLIC_CONST (addr));
addr = force_const_mem (Pmode, addr);
emit_move_insn (temp, addr);
- new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
+ new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
if (reg != 0)
{
- s390_load_address (reg, new);
- new = reg;
+ s390_load_address (reg, new_rtx);
+ new_rtx = reg;
}
}
}
if (reload_in_progress || reload_completed)
df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
- new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
- new = gen_rtx_CONST (Pmode, new);
- new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
- new = gen_const_mem (Pmode, new);
- emit_move_insn (reg, new);
- new = reg;
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
+ new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
+ new_rtx = gen_const_mem (Pmode, new_rtx);
+ emit_move_insn (reg, new_rtx);
+ new_rtx = reg;
}
else if (TARGET_CPU_ZARCH)
{
gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
|| REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
- new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
- new = gen_rtx_CONST (Pmode, new);
- emit_move_insn (temp, new);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
+ emit_move_insn (temp, new_rtx);
- new = gen_const_mem (Pmode, temp);
- emit_move_insn (reg, new);
- new = reg;
+ new_rtx = gen_const_mem (Pmode, temp);
+ emit_move_insn (reg, new_rtx);
+ new_rtx = reg;
}
else
{
addr = force_const_mem (Pmode, addr);
emit_move_insn (temp, addr);
- new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
- new = gen_const_mem (Pmode, new);
- emit_move_insn (reg, new);
- new = reg;
+ new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
+ new_rtx = gen_const_mem (Pmode, new_rtx);
+ emit_move_insn (reg, new_rtx);
+ new_rtx = reg;
}
}
else
out of the literal pool, force them back in. */
case UNSPEC_GOTOFF:
case UNSPEC_PLTOFF:
- new = force_const_mem (Pmode, orig);
+ new_rtx = force_const_mem (Pmode, orig);
break;
/* @GOT is OK as is if small. */
case UNSPEC_GOT:
if (flag_pic == 2)
- new = force_const_mem (Pmode, orig);
+ new_rtx = force_const_mem (Pmode, orig);
break;
/* @GOTENT is OK as is. */
addr = force_const_mem (Pmode, addr);
emit_move_insn (temp, addr);
- new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
+ new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
if (reg != 0)
{
- s390_load_address (reg, new);
- new = reg;
+ s390_load_address (reg, new_rtx);
+ new_rtx = reg;
}
}
break;
}
emit_move_insn (temp, op0);
- new = gen_rtx_PLUS (Pmode, temp, op1);
+ new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
if (reg != 0)
{
- s390_load_address (reg, new);
- new = reg;
+ s390_load_address (reg, new_rtx);
+ new_rtx = reg;
}
}
else
addr = force_const_mem (Pmode, addr);
emit_move_insn (temp, addr);
- new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
+ new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
if (reg != 0)
{
- s390_load_address (reg, new);
- new = reg;
+ s390_load_address (reg, new_rtx);
+ new_rtx = reg;
}
}
}
{
gcc_assert (XVECLEN (op0, 0) == 1);
- new = force_const_mem (Pmode, orig);
+ new_rtx = force_const_mem (Pmode, orig);
}
/* Otherwise, compute the sum. */
else
{
base = legitimize_pic_address (XEXP (addr, 0), reg);
- new = legitimize_pic_address (XEXP (addr, 1),
+ new_rtx = legitimize_pic_address (XEXP (addr, 1),
base == reg ? NULL_RTX : reg);
- if (GET_CODE (new) == CONST_INT)
- new = plus_constant (base, INTVAL (new));
+ if (GET_CODE (new_rtx) == CONST_INT)
+ new_rtx = plus_constant (base, INTVAL (new_rtx));
else
{
- if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
+ if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
{
- base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
- new = XEXP (new, 1);
+ base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
+ new_rtx = XEXP (new_rtx, 1);
}
- new = gen_rtx_PLUS (Pmode, base, new);
+ new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
}
- if (GET_CODE (new) == CONST)
- new = XEXP (new, 0);
- new = force_operand (new, 0);
+ if (GET_CODE (new_rtx) == CONST)
+ new_rtx = XEXP (new_rtx, 0);
+ new_rtx = force_operand (new_rtx, 0);
}
}
}
- return new;
+ return new_rtx;
}
/* Load the thread pointer into a register. */
static rtx
legitimize_tls_address (rtx addr, rtx reg)
{
- rtx new, tls_call, temp, base, r2, insn;
+ rtx new_rtx, tls_call, temp, base, r2, insn;
if (GET_CODE (addr) == SYMBOL_REF)
switch (tls_symbolic_operand (addr))
start_sequence ();
r2 = gen_rtx_REG (Pmode, 2);
tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
- new = gen_rtx_CONST (Pmode, tls_call);
- new = force_const_mem (Pmode, new);
- emit_move_insn (r2, new);
+ new_rtx = gen_rtx_CONST (Pmode, tls_call);
+ new_rtx = force_const_mem (Pmode, new_rtx);
+ emit_move_insn (r2, new_rtx);
s390_emit_tls_call_insn (r2, tls_call);
insn = get_insns ();
end_sequence ();
- new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
temp = gen_reg_rtx (Pmode);
- emit_libcall_block (insn, temp, r2, new);
+ emit_libcall_block (insn, temp, r2, new_rtx);
- new = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
+ new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
if (reg != 0)
{
- s390_load_address (reg, new);
- new = reg;
+ s390_load_address (reg, new_rtx);
+ new_rtx = reg;
}
break;
start_sequence ();
r2 = gen_rtx_REG (Pmode, 2);
tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
- new = gen_rtx_CONST (Pmode, tls_call);
- new = force_const_mem (Pmode, new);
- emit_move_insn (r2, new);
+ new_rtx = gen_rtx_CONST (Pmode, tls_call);
+ new_rtx = force_const_mem (Pmode, new_rtx);
+ emit_move_insn (r2, new_rtx);
s390_emit_tls_call_insn (r2, tls_call);
insn = get_insns ();
end_sequence ();
- new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
temp = gen_reg_rtx (Pmode);
- emit_libcall_block (insn, temp, r2, new);
+ emit_libcall_block (insn, temp, r2, new_rtx);
- new = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
+ new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
base = gen_reg_rtx (Pmode);
- s390_load_address (base, new);
+ s390_load_address (base, new_rtx);
- new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
- new = gen_rtx_CONST (Pmode, new);
- new = force_const_mem (Pmode, new);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
+ new_rtx = force_const_mem (Pmode, new_rtx);
temp = gen_reg_rtx (Pmode);
- emit_move_insn (temp, new);
+ emit_move_insn (temp, new_rtx);
- new = gen_rtx_PLUS (Pmode, base, temp);
+ new_rtx = gen_rtx_PLUS (Pmode, base, temp);
if (reg != 0)
{
- s390_load_address (reg, new);
- new = reg;
+ s390_load_address (reg, new_rtx);
+ new_rtx = reg;
}
break;
if (reload_in_progress || reload_completed)
df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
- new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
- new = gen_rtx_CONST (Pmode, new);
- new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
- new = gen_const_mem (Pmode, new);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
+ new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
+ new_rtx = gen_const_mem (Pmode, new_rtx);
temp = gen_reg_rtx (Pmode);
- emit_move_insn (temp, new);
+ emit_move_insn (temp, new_rtx);
}
else if (TARGET_CPU_ZARCH)
{
/* If the GOT offset might be >= 4k, we determine the position
of the GOT entry via a PC-relative LARL. */
- new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
- new = gen_rtx_CONST (Pmode, new);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
temp = gen_reg_rtx (Pmode);
- emit_move_insn (temp, new);
+ emit_move_insn (temp, new_rtx);
- new = gen_const_mem (Pmode, temp);
+ new_rtx = gen_const_mem (Pmode, temp);
temp = gen_reg_rtx (Pmode);
- emit_move_insn (temp, new);
+ emit_move_insn (temp, new_rtx);
}
else if (flag_pic)
{
if (reload_in_progress || reload_completed)
df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
- new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
- new = gen_rtx_CONST (Pmode, new);
- new = force_const_mem (Pmode, new);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
+ new_rtx = force_const_mem (Pmode, new_rtx);
temp = gen_reg_rtx (Pmode);
- emit_move_insn (temp, new);
+ emit_move_insn (temp, new_rtx);
- new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
- new = gen_const_mem (Pmode, new);
+ new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
+ new_rtx = gen_const_mem (Pmode, new_rtx);
- new = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new, addr), UNSPEC_TLS_LOAD);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
temp = gen_reg_rtx (Pmode);
- emit_insn (gen_rtx_SET (Pmode, temp, new));
+ emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
}
else
{
/* In position-dependent code, load the absolute address of
the GOT entry from the literal pool. */
- new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
- new = gen_rtx_CONST (Pmode, new);
- new = force_const_mem (Pmode, new);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
+ new_rtx = force_const_mem (Pmode, new_rtx);
temp = gen_reg_rtx (Pmode);
- emit_move_insn (temp, new);
+ emit_move_insn (temp, new_rtx);
- new = temp;
- new = gen_const_mem (Pmode, new);
- new = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new, addr), UNSPEC_TLS_LOAD);
+ new_rtx = temp;
+ new_rtx = gen_const_mem (Pmode, new_rtx);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
temp = gen_reg_rtx (Pmode);
- emit_insn (gen_rtx_SET (Pmode, temp, new));
+ emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
}
- new = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
+ new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
if (reg != 0)
{
- s390_load_address (reg, new);
- new = reg;
+ s390_load_address (reg, new_rtx);
+ new_rtx = reg;
}
break;
case TLS_MODEL_LOCAL_EXEC:
- new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
- new = gen_rtx_CONST (Pmode, new);
- new = force_const_mem (Pmode, new);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
+ new_rtx = force_const_mem (Pmode, new_rtx);
temp = gen_reg_rtx (Pmode);
- emit_move_insn (temp, new);
+ emit_move_insn (temp, new_rtx);
- new = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
+ new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
if (reg != 0)
{
- s390_load_address (reg, new);
- new = reg;
+ s390_load_address (reg, new_rtx);
+ new_rtx = reg;
}
break;
{
case UNSPEC_INDNTPOFF:
gcc_assert (TARGET_CPU_ZARCH);
- new = addr;
+ new_rtx = addr;
break;
default:
else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
{
- new = XEXP (XEXP (addr, 0), 0);
- if (GET_CODE (new) != SYMBOL_REF)
- new = gen_rtx_CONST (Pmode, new);
+ new_rtx = XEXP (XEXP (addr, 0), 0);
+ if (GET_CODE (new_rtx) != SYMBOL_REF)
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
- new = legitimize_tls_address (new, reg);
- new = plus_constant (new, INTVAL (XEXP (XEXP (addr, 0), 1)));
- new = force_operand (new, 0);
+ new_rtx = legitimize_tls_address (new_rtx, reg);
+ new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
+ new_rtx = force_operand (new_rtx, 0);
}
else
gcc_unreachable (); /* for now ... */
- return new;
+ return new_rtx;
}
/* Emit insns making the address in operands[1] valid for a standard
{
HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
- rtx cst, tem, new;
+ rtx cst, tem, new_rtx;
cst = GEN_INT (upper);
if (!legitimate_reload_constant_p (cst))
cst = force_const_mem (Pmode, cst);
tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
- new = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
+ new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
opnum, (enum reload_type) type);
- return new;
+ return new_rtx;
}
return NULL_RTX;
}
/* Expand an atomic compare and swap operation for HImode and QImode. MEM is
- the memory location, CMP the old value to compare MEM with and NEW the value
+ the memory location, CMP the old value to compare MEM with and NEW_RTX the value
to set if CMP == MEM.
CMP is never in memory for compare_and_swap_cc because
expand_bool_compare_and_swap puts it into a register for later compare. */
void
-s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new)
+s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
{
struct alignment_context ac;
rtx cmpv, newv, val, resv, cc;
/* Shift the values to the correct bit positions. */
if (!(ac.aligned && MEM_P (cmp)))
cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
- if (!(ac.aligned && MEM_P (new)))
- new = s390_expand_mask_and_shift (new, mode, ac.shift);
+ if (!(ac.aligned && MEM_P (new_rtx)))
+ new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
/* Load full word. Subsequent loads are performed by CS. */
val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
else
cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
NULL_RTX, 1, OPTAB_DIRECT));
- if (ac.aligned && MEM_P (new))
+ if (ac.aligned && MEM_P (new_rtx))
{
newv = force_reg (SImode, val);
- store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new);
+ store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new_rtx);
}
else
- newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new, val,
+ newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
NULL_RTX, 1, OPTAB_DIRECT));
/* Jump to end if we're done (likely?). */
{
struct alignment_context ac;
rtx cmp;
- rtx new = gen_reg_rtx (SImode);
+ rtx new_rtx = gen_reg_rtx (SImode);
rtx orig = gen_reg_rtx (SImode);
rtx csloop = gen_label_rtx ();
/* Start CS loop. */
emit_label (csloop);
- emit_move_insn (new, cmp);
+ emit_move_insn (new_rtx, cmp);
/* Patch new with val at correct position. */
switch (code)
{
case PLUS:
case MINUS:
- val = expand_simple_binop (SImode, code, new, orig,
+ val = expand_simple_binop (SImode, code, new_rtx, orig,
NULL_RTX, 1, OPTAB_DIRECT);
val = expand_simple_binop (SImode, AND, val, ac.modemask,
NULL_RTX, 1, OPTAB_DIRECT);
/* FALLTHRU */
case SET:
if (ac.aligned && MEM_P (val))
- store_bit_field (new, GET_MODE_BITSIZE (mode), 0, SImode, val);
+ store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
else
{
- new = expand_simple_binop (SImode, AND, new, ac.modemaski,
+ new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
NULL_RTX, 1, OPTAB_DIRECT);
- new = expand_simple_binop (SImode, IOR, new, val,
+ new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
NULL_RTX, 1, OPTAB_DIRECT);
}
break;
case AND:
case IOR:
case XOR:
- new = expand_simple_binop (SImode, code, new, val,
+ new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
NULL_RTX, 1, OPTAB_DIRECT);
break;
case MULT: /* NAND */
- new = expand_simple_binop (SImode, XOR, new, ac.modemask,
+ new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
NULL_RTX, 1, OPTAB_DIRECT);
- new = expand_simple_binop (SImode, AND, new, val,
+ new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
NULL_RTX, 1, OPTAB_DIRECT);
break;
default:
}
s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
- ac.memsi, cmp, new));
+ ac.memsi, cmp, new_rtx));
/* Return the correct part of the bitfield. */
if (target)
convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
- after ? new : cmp, ac.shift,
+ after ? new_rtx : cmp, ac.shift,
NULL_RTX, 1, OPTAB_DIRECT), 1);
}
}
/* Maximum number of registers to represent a value of mode MODE
- in a register of class CLASS. */
+ in a register of class RCLASS. */
bool
-s390_class_max_nregs (enum reg_class class, enum machine_mode mode)
+s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
{
- switch (class)
+ switch (rclass)
{
case FP_REGS:
if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
macro is used in only one place: `find_reloads_address' in reload.c. */
#define LEGITIMIZE_RELOAD_ADDRESS(AD, MODE, OPNUM, TYPE, IND, WIN) \
do { \
- rtx new = legitimize_reload_address (AD, MODE, OPNUM, (int)(TYPE)); \
- if (new) \
+ rtx new_rtx = legitimize_reload_address (AD, MODE, OPNUM, (int)(TYPE)); \
+ if (new_rtx) \
{ \
- (AD) = new; \
+ (AD) = new_rtx; \
goto WIN; \
} \
} while (0)
(match_operand:P 2 "register_operand" "=a")])]
""
{
- rtx new = legitimize_pic_address (operands[1], operands[2]);
- emit_move_insn (operands[0], new);
+ rtx new_rtx = legitimize_pic_address (operands[1], operands[2]);
+ emit_move_insn (operands[0], new_rtx);
})
;
const char *directive, const char *fmt, ...);
extern int score_output_external (FILE *file, tree decl, const char *name);
extern void score_override_options (void);
-extern enum reg_class score_secondary_reload_class (enum reg_class class,
+extern enum reg_class score_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode,
rtx x);
extern rtx score_function_value (tree valtype, tree func,
enum machine_mode mode);
extern enum reg_class score_preferred_reload_class (rtx x,
- enum reg_class class);
+ enum reg_class rclass);
extern HOST_WIDE_INT score_initial_elimination_offset (int from, int to);
extern void score_print_operand (FILE *file, rtx op, int letter);
extern void score_print_operand_address (FILE *file, rtx addr);
/* Implement PREFERRED_RELOAD_CLASS macro. */
enum reg_class
-score_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, enum reg_class class)
+score_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, enum reg_class rclass)
{
if (TARGET_SCORE5 || TARGET_SCORE5U || TARGET_SCORE7 || TARGET_SCORE7D)
- return score7_preferred_reload_class (x, class);
+ return score7_preferred_reload_class (x, rclass);
else if (TARGET_SCORE3)
- return score3_preferred_reload_class (x, class);
+ return score3_preferred_reload_class (x, rclass);
gcc_unreachable ();
}
/* Implement SECONDARY_INPUT_RELOAD_CLASS
and SECONDARY_OUTPUT_RELOAD_CLASS macro. */
enum reg_class
-score_secondary_reload_class (enum reg_class class,
+score_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode ATTRIBUTE_UNUSED,
rtx x)
{
if (TARGET_SCORE5 || TARGET_SCORE5U || TARGET_SCORE7 || TARGET_SCORE7D)
- return score7_secondary_reload_class (class, mode, x);
+ return score7_secondary_reload_class (rclass, mode, x);
else if (TARGET_SCORE3)
- return score3_secondary_reload_class (class, mode, x);
+ return score3_secondary_reload_class (rclass, mode, x);
gcc_unreachable ();
}
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
- rtx this, temp1, insn, fnaddr;
+ rtx this_rtx, temp1, insn, fnaddr;
/* Pretend to be a post-reload pass while generating rtl. */
reload_completed = 1;
/* Find out which register contains the "this" pointer. */
if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
- this = gen_rtx_REG (Pmode, ARG_REG_FIRST + 1);
+ this_rtx = gen_rtx_REG (Pmode, ARG_REG_FIRST + 1);
else
- this = gen_rtx_REG (Pmode, ARG_REG_FIRST);
+ this_rtx = gen_rtx_REG (Pmode, ARG_REG_FIRST);
- /* Add DELTA to THIS. */
+ /* Add DELTA to THIS_RTX. */
if (delta != 0)
{
rtx offset = GEN_INT (delta);
emit_move_insn (temp1, offset);
offset = temp1;
}
- emit_insn (gen_add3_insn (this, this, offset));
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
}
- /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
+ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
if (vcall_offset != 0)
{
rtx addr;
- /* Set TEMP1 to *THIS. */
- emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
+ /* Set TEMP1 to *THIS_RTX. */
+ emit_move_insn (temp1, gen_rtx_MEM (Pmode, this_rtx));
- /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
+ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
addr = score3_add_offset (temp1, vcall_offset);
- /* Load the offset and add it to THIS. */
+ /* Load the offset and add it to THIS_RTX. */
emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
- emit_insn (gen_add3_insn (this, this, temp1));
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
}
/* Jump to the target function. */
/* Implement PREFERRED_RELOAD_CLASS macro. */
enum reg_class
-score3_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, enum reg_class class)
+score3_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, enum reg_class rclass)
{
- if (reg_class_subset_p (G16_REGS, class))
+ if (reg_class_subset_p (G16_REGS, rclass))
return G16_REGS;
- if (reg_class_subset_p (G32_REGS, class))
+ if (reg_class_subset_p (G32_REGS, rclass))
return G32_REGS;
- return class;
+ return rclass;
}
/* Implement SECONDARY_INPUT_RELOAD_CLASS
and SECONDARY_OUTPUT_RELOAD_CLASS macro. */
enum reg_class
-score3_secondary_reload_class (enum reg_class class,
+score3_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode ATTRIBUTE_UNUSED,
rtx x)
{
if (GET_CODE (x) == REG || GET_CODE(x) == SUBREG)
regno = true_regnum (x);
- if (!GR_REG_CLASS_P (class))
+ if (!GR_REG_CLASS_P (rclass))
return GP_REG_P (regno) ? NO_REGS : G32_REGS;
return NO_REGS;
}
score3_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
{
int size = GET_MODE_SIZE (mode);
- enum mode_class class = GET_MODE_CLASS (mode);
+ enum mode_class mclass = GET_MODE_CLASS (mode);
- if (class == MODE_CC)
+ if (mclass == MODE_CC)
return regno == CC_REGNUM;
else if (regno == FRAME_POINTER_REGNUM
|| regno == ARG_POINTER_REGNUM)
- return class == MODE_INT;
+ return mclass == MODE_INT;
else if (GP_REG_P (regno))
return !(regno & 1) || (size <= UNITS_PER_WORD);
else if (CE_REG_P (regno))
- return (class == MODE_INT
+ return (mclass == MODE_INT
&& ((size <= UNITS_PER_WORD)
|| (regno == CE_REG_FIRST && size == 2 * UNITS_PER_WORD)));
else
- return (class == MODE_INT) && (size <= UNITS_PER_WORD);
+ return (mclass == MODE_INT) && (size <= UNITS_PER_WORD);
}
/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
extern void score3_override_options (void);
extern int score3_reg_class (int regno);
extern enum reg_class score3_preferred_reload_class (rtx x ATTRIBUTE_UNUSED,
- enum reg_class class);
+ enum reg_class rclass);
extern enum reg_class
-score3_secondary_reload_class (enum reg_class class,
+score3_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode ATTRIBUTE_UNUSED,
rtx x);
extern int score3_const_ok_for_letter_p (HOST_WIDE_INT value, char c);
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
- rtx this, temp1, insn, fnaddr;
+ rtx this_rtx, temp1, insn, fnaddr;
/* Pretend to be a post-reload pass while generating rtl. */
reload_completed = 1;
/* Find out which register contains the "this" pointer. */
if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
- this = gen_rtx_REG (Pmode, ARG_REG_FIRST + 1);
+ this_rtx = gen_rtx_REG (Pmode, ARG_REG_FIRST + 1);
else
- this = gen_rtx_REG (Pmode, ARG_REG_FIRST);
+ this_rtx = gen_rtx_REG (Pmode, ARG_REG_FIRST);
- /* Add DELTA to THIS. */
+ /* Add DELTA to THIS_RTX. */
if (delta != 0)
{
rtx offset = GEN_INT (delta);
emit_move_insn (temp1, offset);
offset = temp1;
}
- emit_insn (gen_add3_insn (this, this, offset));
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
}
- /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
+ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
if (vcall_offset != 0)
{
rtx addr;
- /* Set TEMP1 to *THIS. */
- emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
+ /* Set TEMP1 to *THIS_RTX. */
+ emit_move_insn (temp1, gen_rtx_MEM (Pmode, this_rtx));
- /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
+ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
addr = score7_add_offset (temp1, vcall_offset);
- /* Load the offset and add it to THIS. */
+ /* Load the offset and add it to THIS_RTX. */
emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
- emit_insn (gen_add3_insn (this, this, temp1));
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
}
/* Jump to the target function. */
/* Implement PREFERRED_RELOAD_CLASS macro. */
enum reg_class
-score7_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, enum reg_class class)
+score7_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, enum reg_class rclass)
{
- if (reg_class_subset_p (G16_REGS, class))
+ if (reg_class_subset_p (G16_REGS, rclass))
return G16_REGS;
- if (reg_class_subset_p (G32_REGS, class))
+ if (reg_class_subset_p (G32_REGS, rclass))
return G32_REGS;
- return class;
+ return rclass;
}
/* Implement SECONDARY_INPUT_RELOAD_CLASS
and SECONDARY_OUTPUT_RELOAD_CLASS macro. */
enum reg_class
-score7_secondary_reload_class (enum reg_class class,
+score7_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode ATTRIBUTE_UNUSED,
rtx x)
{
if (GET_CODE (x) == REG || GET_CODE(x) == SUBREG)
regno = true_regnum (x);
- if (!GR_REG_CLASS_P (class))
+ if (!GR_REG_CLASS_P (rclass))
return GP_REG_P (regno) ? NO_REGS : G32_REGS;
return NO_REGS;
}
score7_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
{
int size = GET_MODE_SIZE (mode);
- enum mode_class class = GET_MODE_CLASS (mode);
+ enum mode_class mclass = GET_MODE_CLASS (mode);
- if (class == MODE_CC)
+ if (mclass == MODE_CC)
return regno == CC_REGNUM;
else if (regno == FRAME_POINTER_REGNUM
|| regno == ARG_POINTER_REGNUM)
- return class == MODE_INT;
+ return mclass == MODE_INT;
else if (GP_REG_P (regno))
/* ((regno <= (GP_REG_LAST- HARD_REGNO_NREGS (dummy, mode)) + 1) */
return !(regno & 1) || (size <= UNITS_PER_WORD);
else if (CE_REG_P (regno))
- return (class == MODE_INT
+ return (mclass == MODE_INT
&& ((size <= UNITS_PER_WORD)
|| (regno == CE_REG_FIRST && size == 2 * UNITS_PER_WORD)));
else
- return (class == MODE_INT) && (size <= UNITS_PER_WORD);
+ return (mclass == MODE_INT) && (size <= UNITS_PER_WORD);
}
/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
extern void score7_override_options (void);
extern int score7_reg_class (int regno);
extern enum reg_class score7_preferred_reload_class (rtx x ATTRIBUTE_UNUSED,
- enum reg_class class);
+ enum reg_class rclass);
extern enum
-reg_class score7_secondary_reload_class (enum reg_class class,
+reg_class score7_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode ATTRIBUTE_UNUSED,
rtx x);
extern int score7_const_ok_for_letter_p (HOST_WIDE_INT value, char c);
{
/* This is like change_address_1 (operands[0], mode, 0, 1) ,
except that we can't use that function because it is static. */
- rtx new = change_address (operands[0], mode, 0);
- MEM_COPY_ATTRIBUTES (new, operands[0]);
- operands[0] = new;
+ rtx new_rtx = change_address (operands[0], mode, 0);
+ MEM_COPY_ATTRIBUTES (new_rtx, operands[0]);
+ operands[0] = new_rtx;
}
/* This case can happen while generating code to move the result
const char *
output_far_jump (rtx insn, rtx op)
{
- struct { rtx lab, reg, op; } this;
+ struct { rtx lab, reg, op; } this_jmp;
rtx braf_base_lab = NULL_RTX;
const char *jump;
int far;
int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
rtx prev;
- this.lab = gen_label_rtx ();
+ this_jmp.lab = gen_label_rtx ();
if (TARGET_SH2
&& offset >= -32764
if (GET_CODE ((prev = prev_nonnote_insn (insn))) == INSN
&& INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
{
- this.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
- if (REGNO (this.reg) == R0_REG && flag_pic && ! TARGET_SH2)
+ this_jmp.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
+ if (REGNO (this_jmp.reg) == R0_REG && flag_pic && ! TARGET_SH2)
jump = "mov.l r1,@-r15; mova %O0,r0; mov.l @r0,r1; add r1,r0; mov.l @r15+,r1; jmp @%1";
- output_asm_insn (jump, &this.lab);
+ output_asm_insn (jump, &this_jmp.lab);
if (dbr_sequence_length ())
print_slot (final_sequence);
else
if (dbr_sequence_length ())
print_slot (final_sequence);
- this.reg = gen_rtx_REG (SImode, 13);
+ this_jmp.reg = gen_rtx_REG (SImode, 13);
/* We must keep the stack aligned to 8-byte boundaries on SH5.
Fortunately, MACL is fixed and call-clobbered, and we never
need its value across jumps, so save r13 in it instead of in
output_asm_insn ("lds r13, macl", 0);
else
output_asm_insn ("mov.l r13,@-r15", 0);
- output_asm_insn (jump, &this.lab);
+ output_asm_insn (jump, &this_jmp.lab);
if (TARGET_SH5)
output_asm_insn ("sts macl, r13", 0);
else
}
if (far)
output_asm_insn (".align 2", 0);
- (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this.lab));
- this.op = op;
+ (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this_jmp.lab));
+ this_jmp.op = op;
if (far && flag_pic)
{
if (TARGET_SH2)
- this.lab = braf_base_lab;
- output_asm_insn (".long %O2-%O0", &this.lab);
+ this_jmp.lab = braf_base_lab;
+ output_asm_insn (".long %O2-%O0", &this_jmp.lab);
}
else
- output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this.lab);
+ output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this_jmp.lab);
return "";
}
}
}
-/* Output a code sequence for INSN using TEMPLATE with OPERANDS; but before,
+/* Output a code sequence for INSN using TEMPL with OPERANDS; but before,
fill in operands 9 as a label to the successor insn.
We try to use jump threading where possible.
IF CODE matches the comparison in the IF_THEN_ELSE of a following jump,
we assume the jump is taken. I.e. EQ means follow jmp and bf, NE means
follow jmp and bt, if the address is in range. */
const char *
-output_branchy_insn (enum rtx_code code, const char *template,
+output_branchy_insn (enum rtx_code code, const char *templ,
rtx insn, rtx *operands)
{
rtx next_insn = NEXT_INSN (insn);
INSN_ADDRESSES_NEW (operands[9],
INSN_ADDRESSES (INSN_UID (next_insn))
+ get_attr_length (next_insn));
- return template;
+ return templ;
}
else
{
/* branch_true */
src = XEXP (src, 1);
operands[9] = src;
- return template;
+ return templ;
}
}
}
INSN_ADDRESSES_NEW (operands[9],
INSN_ADDRESSES (INSN_UID (insn))
+ get_attr_length (insn));
- return template;
+ return templ;
}
const char *
add_constant (rtx x, enum machine_mode mode, rtx last_value)
{
int i;
- rtx lab, new;
+ rtx lab, new_rtx;
label_ref_list_t ref, newref;
/* First see if we've already got it. */
}
if (rtx_equal_p (x, pool_vector[i].value))
{
- lab = new = 0;
+ lab = new_rtx = 0;
if (! last_value
|| ! i
|| ! rtx_equal_p (last_value, pool_vector[i-1].value))
{
- new = gen_label_rtx ();
- LABEL_REFS (new) = pool_vector[i].label;
- pool_vector[i].label = lab = new;
+ new_rtx = gen_label_rtx ();
+ LABEL_REFS (new_rtx) = pool_vector[i].label;
+ pool_vector[i].label = lab = new_rtx;
}
if (lab && pool_window_label)
{
newref->next = ref;
pool_vector[pool_window_last].wend = newref;
}
- if (new)
- pool_window_label = new;
+ if (new_rtx)
+ pool_window_label = new_rtx;
pool_window_last = i;
return lab;
}
rtx scan;
/* Don't look for the stack pointer as a scratch register,
it would cause trouble if an interrupt occurred. */
- unsigned try = 0x7fff, used;
+ unsigned attempt = 0x7fff, used;
int jump_left = flag_expensive_optimizations + 1;
/* It is likely that the most recent eligible instruction is wanted for
&& GET_CODE (PATTERN (scan)) != CLOBBER
&& get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)
{
- try &= ~regs_used (PATTERN (scan), 0);
+ attempt &= ~regs_used (PATTERN (scan), 0);
break;
}
}
if (code == CALL_INSN)
used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0);
dead |= (used >> 16) & ~used;
- if (dead & try)
+ if (dead & attempt)
{
- dead &= try;
+ dead &= attempt;
break;
}
if (code == JUMP_INSN)
{
int sum = 0;
rtx body = PATTERN (insn);
- const char *template;
+ const char *templ;
char c;
int maybe_label = 1;
if (GET_CODE (body) == ASM_INPUT)
- template = XSTR (body, 0);
+ templ = XSTR (body, 0);
else if (asm_noperands (body) >= 0)
- template
+ templ
= decode_asm_operands (body, NULL, NULL, NULL, NULL, NULL);
else
return 0;
int ppi_adjust = 0;
do
- c = *template++;
+ c = *templ++;
while (c == ' ' || c == '\t');
/* all sh-dsp parallel-processing insns start with p.
The only non-ppi sh insn starting with p is pref.
The only ppi starting with pr is prnd. */
- if ((c == 'p' || c == 'P') && strncasecmp ("re", template, 2))
+ if ((c == 'p' || c == 'P') && strncasecmp ("re", templ, 2))
ppi_adjust = 2;
/* The repeat pseudo-insn expands two three insns, a total of
six bytes in size. */
else if ((c == 'r' || c == 'R')
- && ! strncasecmp ("epeat", template, 5))
+ && ! strncasecmp ("epeat", templ, 5))
ppi_adjust = 4;
while (c && c != '\n'
- && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c, template))
+ && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c, templ))
{
/* If this is a label, it is obviously not a ppi insn. */
if (c == ':' && maybe_label)
}
else if (c == '\'' || c == '"')
maybe_label = 0;
- c = *template++;
+ c = *templ++;
}
sum += ppi_adjust;
maybe_label = c != ':';
is invalid. */
bool
sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
- enum reg_class class)
+ enum reg_class rclass)
{
/* We want to enable the use of SUBREGs as a means to
VEC_SELECT a single element of a vector. */
if (to == SFmode && VECTOR_MODE_P (from) && GET_MODE_INNER (from) == SFmode)
- return (reg_classes_intersect_p (GENERAL_REGS, class));
+ return (reg_classes_intersect_p (GENERAL_REGS, rclass));
if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
{
if (TARGET_LITTLE_ENDIAN)
{
if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
- return reg_classes_intersect_p (DF_REGS, class);
+ return reg_classes_intersect_p (DF_REGS, rclass);
}
else
{
if (GET_MODE_SIZE (from) < 8)
- return reg_classes_intersect_p (DF_HI_REGS, class);
+ return reg_classes_intersect_p (DF_HI_REGS, rclass);
}
}
return 0;
{
CUMULATIVE_ARGS cum;
int structure_value_byref = 0;
- rtx this, this_value, sibcall, insns, funexp;
+ rtx this_rtx, this_value, sibcall, insns, funexp;
tree funtype = TREE_TYPE (function);
int simple_add = CONST_OK_FOR_ADD (delta);
int did_load = 0;
FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
}
- this = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1);
+ this_rtx = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1);
/* For SHcompact, we only have r0 for a scratch register: r1 is the
static chain pointer (even if you can't have nested virtual functions
error ("Need a call-clobbered target register");
}
- this_value = plus_constant (this, delta);
+ this_value = plus_constant (this_rtx, delta);
if (vcall_offset
&& (simple_add || scratch0 != scratch1)
&& strict_memory_address_p (ptr_mode, this_value))
if (!delta)
; /* Do nothing. */
else if (simple_add)
- emit_move_insn (this, this_value);
+ emit_move_insn (this_rtx, this_value);
else
{
emit_move_insn (scratch1, GEN_INT (delta));
- emit_insn (gen_add2_insn (this, scratch1));
+ emit_insn (gen_add2_insn (this_rtx, scratch1));
}
if (vcall_offset)
rtx offset_addr;
if (!did_load)
- emit_load_ptr (scratch0, this);
+ emit_load_ptr (scratch0, this_rtx);
offset_addr = plus_constant (scratch0, vcall_offset);
if (strict_memory_address_p (ptr_mode, offset_addr))
/* scratch0 != scratch1, and we have indexed loads. Get better
schedule by loading the offset into r1 and using an indexed
load - then the load of r1 can issue before the load from
- (this + delta) finishes. */
+ (this_rtx + delta) finishes. */
emit_move_insn (scratch1, GEN_INT (vcall_offset));
offset_addr = gen_rtx_PLUS (Pmode, scratch0, scratch1);
}
if (Pmode != ptr_mode)
scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
- emit_insn (gen_add2_insn (this, scratch0));
+ emit_insn (gen_add2_insn (this_rtx, scratch0));
}
/* Generate a tail call to the target function. */
}
sibcall = emit_call_insn (sibcall);
SIBLING_CALL_P (sibcall) = 1;
- use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this);
+ use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this_rtx);
emit_barrier ();
/* Run just enough of rest_of_compilation to do scheduling and get
if (GET_CODE (x) == SUBREG)
{
- rtx new = replace_n_hard_rtx (SUBREG_REG (x), replacements,
+ rtx new_rtx = replace_n_hard_rtx (SUBREG_REG (x), replacements,
n_replacements, modify);
- if (GET_CODE (new) == CONST_INT)
+ if (GET_CODE (new_rtx) == CONST_INT)
{
- x = simplify_subreg (GET_MODE (x), new,
+ x = simplify_subreg (GET_MODE (x), new_rtx,
GET_MODE (SUBREG_REG (x)),
SUBREG_BYTE (x));
if (! x)
abort ();
}
else if (modify)
- SUBREG_REG (x) = new;
+ SUBREG_REG (x) = new_rtx;
return x;
}
}
else if (GET_CODE (x) == ZERO_EXTEND)
{
- rtx new = replace_n_hard_rtx (XEXP (x, 0), replacements,
+ rtx new_rtx = replace_n_hard_rtx (XEXP (x, 0), replacements,
n_replacements, modify);
- if (GET_CODE (new) == CONST_INT)
+ if (GET_CODE (new_rtx) == CONST_INT)
{
x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
- new, GET_MODE (XEXP (x, 0)));
+ new_rtx, GET_MODE (XEXP (x, 0)));
if (! x)
abort ();
}
else if (modify)
- XEXP (x, 0) = new;
+ XEXP (x, 0) = new_rtx;
return x;
}
fmt = GET_RTX_FORMAT (GET_CODE (x));
for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
{
- rtx new;
+ rtx new_rtx;
if (fmt[i] == 'e')
{
- new = replace_n_hard_rtx (XEXP (x, i), replacements,
+ new_rtx = replace_n_hard_rtx (XEXP (x, i), replacements,
n_replacements, modify);
- if (!new)
+ if (!new_rtx)
return NULL_RTX;
if (modify)
- XEXP (x, i) = new;
+ XEXP (x, i) = new_rtx;
}
else if (fmt[i] == 'E')
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
{
- new = replace_n_hard_rtx (XVECEXP (x, i, j), replacements,
+ new_rtx = replace_n_hard_rtx (XVECEXP (x, i, j), replacements,
n_replacements, modify);
- if (!new)
+ if (!new_rtx)
return NULL_RTX;
if (modify)
- XVECEXP (x, i, j) = new;
+ XVECEXP (x, i, j) = new_rtx;
}
}
}
enum reg_class
-sh_secondary_reload (bool in_p, rtx x, enum reg_class class,
+sh_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
enum machine_mode mode, secondary_reload_info *sri)
{
if (in_p)
{
- if (REGCLASS_HAS_FP_REG (class)
+ if (REGCLASS_HAS_FP_REG (rclass)
&& ! TARGET_SHMEDIA
&& immediate_operand ((x), mode)
&& ! ((fp_zero_operand (x) || fp_one_operand (x))
default:
abort ();
}
- if (class == FPUL_REGS
+ if (rclass == FPUL_REGS
&& ((GET_CODE (x) == REG
&& (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
|| REGNO (x) == T_REG))
|| GET_CODE (x) == PLUS))
return GENERAL_REGS;
- if (class == FPUL_REGS && immediate_operand (x, mode))
+ if (rclass == FPUL_REGS && immediate_operand (x, mode))
{
if (satisfies_constraint_I08 (x) || fp_zero_operand (x))
return GENERAL_REGS;
sri->icode = CODE_FOR_reload_insi__i_fpul;
return NO_REGS;
}
- if (class == FPSCR_REGS
+ if (rclass == FPSCR_REGS
&& ((GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
|| (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == PLUS)))
return GENERAL_REGS;
- if (REGCLASS_HAS_FP_REG (class)
+ if (REGCLASS_HAS_FP_REG (rclass)
&& TARGET_SHMEDIA
&& immediate_operand (x, mode)
&& x != CONST0_RTX (GET_MODE (x))
? CODE_FOR_reload_inqi : CODE_FOR_reload_inhi);
return NO_REGS;
}
- if (TARGET_SHMEDIA && class == GENERAL_REGS
+ if (TARGET_SHMEDIA && rclass == GENERAL_REGS
&& (GET_CODE (x) == LABEL_REF || PIC_DIRECT_ADDR_P (x)))
return TARGET_REGS;
} /* end of input-only processing. */
- if (((REGCLASS_HAS_FP_REG (class)
+ if (((REGCLASS_HAS_FP_REG (rclass)
&& (GET_CODE (x) == REG
&& (GENERAL_OR_AP_REGISTER_P (REGNO (x))
|| (FP_REGISTER_P (REGNO (x)) && mode == SImode
&& TARGET_FMOVD))))
- || (REGCLASS_HAS_GENERAL_REG (class)
+ || (REGCLASS_HAS_GENERAL_REG (rclass)
&& GET_CODE (x) == REG
&& FP_REGISTER_P (REGNO (x))))
&& ! TARGET_SHMEDIA
&& (mode == SFmode || mode == SImode))
return FPUL_REGS;
- if ((class == FPUL_REGS
- || (REGCLASS_HAS_FP_REG (class)
+ if ((rclass == FPUL_REGS
+ || (REGCLASS_HAS_FP_REG (rclass)
&& ! TARGET_SHMEDIA && mode == SImode))
&& (GET_CODE (x) == MEM
|| (GET_CODE (x) == REG
|| REGNO (x) == T_REG
|| system_reg_operand (x, VOIDmode)))))
{
- if (class == FPUL_REGS)
+ if (rclass == FPUL_REGS)
return GENERAL_REGS;
return FPUL_REGS;
}
- if ((class == TARGET_REGS
- || (TARGET_SHMEDIA && class == SIBCALL_REGS))
+ if ((rclass == TARGET_REGS
+ || (TARGET_SHMEDIA && rclass == SIBCALL_REGS))
&& !satisfies_constraint_Csy (x)
&& (GET_CODE (x) != REG || ! GENERAL_REGISTER_P (REGNO (x))))
return GENERAL_REGS;
- if ((class == MAC_REGS || class == PR_REGS)
+ if ((rclass == MAC_REGS || rclass == PR_REGS)
&& GET_CODE (x) == REG && ! GENERAL_REGISTER_P (REGNO (x))
- && class != REGNO_REG_CLASS (REGNO (x)))
+ && rclass != REGNO_REG_CLASS (REGNO (x)))
return GENERAL_REGS;
- if (class != GENERAL_REGS && GET_CODE (x) == REG
+ if (rclass != GENERAL_REGS && GET_CODE (x) == REG
&& TARGET_REGISTER_P (REGNO (x)))
return GENERAL_REGS;
return NO_REGS;
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
- rtx this, insn, funexp;
+ rtx this_rtx, insn, funexp;
unsigned int int_arg_first;
reload_completed = 1;
/* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
returns a structure, the structure return pointer is there instead. */
if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
- this = gen_rtx_REG (Pmode, int_arg_first + 1);
+ this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
else
- this = gen_rtx_REG (Pmode, int_arg_first);
+ this_rtx = gen_rtx_REG (Pmode, int_arg_first);
/* Add DELTA. When possible use a plain add, otherwise load it into
a register first. */
delta_rtx = scratch;
}
- /* THIS += DELTA. */
- emit_insn (gen_add2_insn (this, delta_rtx));
+ /* THIS_RTX += DELTA. */
+ emit_insn (gen_add2_insn (this_rtx, delta_rtx));
}
- /* Add the word at address (*THIS + VCALL_OFFSET). */
+ /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
if (vcall_offset)
{
rtx vcall_offset_rtx = GEN_INT (vcall_offset);
gcc_assert (vcall_offset < 0);
- /* SCRATCH = *THIS. */
- emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
+ /* SCRATCH = *THIS_RTX. */
+ emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
/* Prepare for adding VCALL_OFFSET. The difficulty is that we
may not have any available scratch register at this point. */
vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
}
- /* SCRATCH = *(*THIS + VCALL_OFFSET). */
+ /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
emit_move_insn (scratch, gen_rtx_MEM (Pmode,
gen_rtx_PLUS (Pmode,
scratch,
vcall_offset_rtx)));
- /* THIS += *(*THIS + VCALL_OFFSET). */
- emit_insn (gen_add2_insn (this, scratch));
+ /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
+ emit_insn (gen_add2_insn (this_rtx, scratch));
}
/* Generate a tail call to the target function. */
: get_attr_length (insn) == 4);
int really_reversed = reversed ^ need_longbranch;
const char *ccode;
- const char *template;
+ const char *templ;
const char *operands;
enum rtx_code code;
}
if (need_longbranch)
- template = "b%s %s,.+8 | jmpf %s";
+ templ = "b%s %s,.+8 | jmpf %s";
else
- template = "b%s %s,%s";
- sprintf (string, template, ccode, operands, label);
+ templ = "b%s %s,%s";
+ sprintf (string, templ, ccode, operands, label);
return string;
}
int need_longbranch = get_attr_length (insn) >= 8;
int really_reversed = reversed ^ need_longbranch;
const char *ccode;
- const char *template;
+ const char *templ;
char prevop[16];
enum rtx_code code;
}
if (need_longbranch)
- template = "%s | b%s .+6 | jmpf %s";
+ templ = "%s | b%s .+6 | jmpf %s";
else
- template = "%s | b%s %s";
- sprintf (string, template, prevop, ccode, label);
+ templ = "%s | b%s %s";
+ sprintf (string, templ, prevop, ccode, label);
return string;
}
You should define these macros to indicate to the reload phase that it may
need to allocate at least one register for a reload in addition to the
register to contain the data. Specifically, if copying X to a register
- CLASS in MODE requires an intermediate register, you should define
+ RCLASS in MODE requires an intermediate register, you should define
`SECONDARY_INPUT_RELOAD_CLASS' to return the largest register class all of
whose registers can be used as intermediate registers or scratch registers.
- If copying a register CLASS in MODE to X requires an intermediate or scratch
+ If copying a register RCLASS in MODE to X requires an intermediate or scratch
register, `SECONDARY_OUTPUT_RELOAD_CLASS' should be defined to return the
largest register class required. If the requirements for input and output
reloads are the same, the macro `SECONDARY_RELOAD_CLASS' should be used
The values returned by these macros are often `GENERAL_REGS'. Return
`NO_REGS' if no spare register is needed; i.e., if X can be directly copied
- to or from a register of CLASS in MODE without requiring a scratch register.
+ to or from a register of RCLASS in MODE without requiring a scratch register.
Do not define this macro if it would always return `NO_REGS'.
If a scratch register is required (either with or without an intermediate
Define constraints for the reload register and scratch register that contain
a single register class. If the original reload register (whose class is
- CLASS) can meet the constraint given in the pattern, the value returned by
+ RCLASS) can meet the constraint given in the pattern, the value returned by
these macros is used for the class of the scratch register. Otherwise, two
additional reload registers are required. Their classes are obtained from
the constraints in the insn pattern.
This case often occurs between floating-point and general registers. */
enum reg_class
-xstormy16_secondary_reload_class (enum reg_class class,
+xstormy16_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode,
rtx x)
{
|| ((GET_CODE (x) == SUBREG || GET_CODE (x) == REG)
&& (true_regnum (x) == -1
|| true_regnum (x) >= FIRST_PSEUDO_REGISTER)))
- && ! reg_class_subset_p (class, EIGHT_REGS))
+ && ! reg_class_subset_p (rclass, EIGHT_REGS))
return EIGHT_REGS;
/* When reloading a PLUS, the carry register will be required
}
enum reg_class
-xstormy16_preferred_reload_class (rtx x, enum reg_class class)
+xstormy16_preferred_reload_class (rtx x, enum reg_class rclass)
{
- if (class == GENERAL_REGS
+ if (rclass == GENERAL_REGS
&& GET_CODE (x) == MEM)
return EIGHT_REGS;
- return class;
+ return rclass;
}
/* Predicate for symbols and addresses that reflect special 8-bit
/* Expand an atomic compare and swap operation for HImode and QImode.
MEM is the memory location, CMP the old value to compare MEM with
- and NEW the value to set if CMP == MEM. */
+ and NEW_RTX the value to set if CMP == MEM. */
void
-xtensa_expand_compare_and_swap (rtx target, rtx mem, rtx cmp, rtx new)
+xtensa_expand_compare_and_swap (rtx target, rtx mem, rtx cmp, rtx new_rtx)
{
enum machine_mode mode = GET_MODE (mem);
struct alignment_context ac;
if (ac.shift != NULL_RTX)
{
cmp = xtensa_expand_mask_and_shift (cmp, mode, ac.shift);
- new = xtensa_expand_mask_and_shift (new, mode, ac.shift);
+ new_rtx = xtensa_expand_mask_and_shift (new_rtx, mode, ac.shift);
}
/* Load the surrounding word into VAL with the MEM value masked out. */
OPTAB_DIRECT));
emit_label (csloop);
- /* Patch CMP and NEW into VAL at correct position. */
+ /* Patch CMP and NEW_RTX into VAL at correct position. */
cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
NULL_RTX, 1, OPTAB_DIRECT));
- newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new, val,
+ newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
NULL_RTX, 1, OPTAB_DIRECT));
/* Jump to end if we're done. */
rtx csloop = gen_label_rtx ();
rtx cmp, tmp;
rtx old = gen_reg_rtx (SImode);
- rtx new = gen_reg_rtx (SImode);
+ rtx new_rtx = gen_reg_rtx (SImode);
rtx orig = NULL_RTX;
init_alignment_context (&ac, mem);
tmp = expand_simple_binop (SImode, AND, old, ac.modemaski,
NULL_RTX, 1, OPTAB_DIRECT);
tmp = expand_simple_binop (SImode, IOR, tmp, val,
- new, 1, OPTAB_DIRECT);
+ new_rtx, 1, OPTAB_DIRECT);
break;
case AND:
case IOR:
case XOR:
tmp = expand_simple_binop (SImode, code, old, val,
- new, 1, OPTAB_DIRECT);
+ new_rtx, 1, OPTAB_DIRECT);
break;
case MULT: /* NAND */
tmp = expand_simple_binop (SImode, XOR, old, ac.modemask,
NULL_RTX, 1, OPTAB_DIRECT);
tmp = expand_simple_binop (SImode, AND, tmp, val,
- new, 1, OPTAB_DIRECT);
+ new_rtx, 1, OPTAB_DIRECT);
break;
default:
gcc_unreachable ();
}
- if (tmp != new)
- emit_move_insn (new, tmp);
- emit_insn (gen_sync_compare_and_swapsi (cmp, ac.memsi, old, new));
+ if (tmp != new_rtx)
+ emit_move_insn (new_rtx, tmp);
+ emit_insn (gen_sync_compare_and_swapsi (cmp, ac.memsi, old, new_rtx));
emit_cmp_and_jump_insns (cmp, old, NE, const0_rtx, SImode, true, csloop);
if (target)
{
- tmp = (after ? new : cmp);
+ tmp = (after ? new_rtx : cmp);
convert_move (target,
(ac.shift == NULL_RTX ? tmp
: expand_simple_binop (SImode, LSHIFTRT, tmp, ac.shift,
mode = (enum machine_mode) ((int) mode + 1))
{
int size = GET_MODE_SIZE (mode);
- enum mode_class class = GET_MODE_CLASS (mode);
+ enum mode_class mclass = GET_MODE_CLASS (mode);
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
{
if (ACC_REG_P (regno))
temp = (TARGET_MAC16
- && (class == MODE_INT) && (size <= UNITS_PER_WORD));
+ && (mclass == MODE_INT) && (size <= UNITS_PER_WORD));
else if (GP_REG_P (regno))
temp = ((regno & 1) == 0 || (size <= UNITS_PER_WORD));
else if (FP_REG_P (regno))
enum reg_class
-xtensa_preferred_reload_class (rtx x, enum reg_class class, int isoutput)
+xtensa_preferred_reload_class (rtx x, enum reg_class rclass, int isoutput)
{
if (!isoutput && CONSTANT_P (x) && GET_CODE (x) == CONST_DOUBLE)
return NO_REGS;
won't know that it is live because the hard frame pointer is
treated specially. */
- if (class == AR_REGS || class == GR_REGS)
+ if (rclass == AR_REGS || rclass == GR_REGS)
return RL_REGS;
- return class;
+ return rclass;
}
enum reg_class
-xtensa_secondary_reload_class (enum reg_class class,
+xtensa_secondary_reload_class (enum reg_class rclass,
enum machine_mode mode ATTRIBUTE_UNUSED,
rtx x, int isoutput)
{
if (!isoutput)
{
- if ((class == FP_REGS || GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ if ((rclass == FP_REGS || GET_MODE_SIZE (mode) < UNITS_PER_WORD)
&& constantpool_mem_p (x))
return RL_REGS;
}
if (ACC_REG_P (regno))
- return ((class == GR_REGS || class == RL_REGS) ? NO_REGS : RL_REGS);
- if (class == ACC_REG)
+ return ((rclass == GR_REGS || rclass == RL_REGS) ? NO_REGS : RL_REGS);
+ if (rclass == ACC_REG)
return (GP_REG_P (regno) ? NO_REGS : RL_REGS);
return NO_REGS;
{
if (! annul_p)
{
- rtx new;
+ rtx new_rtx;
update_block (dtrial, thread);
- new = delete_from_delay_slot (dtrial);
+ new_rtx = delete_from_delay_slot (dtrial);
if (INSN_DELETED_P (thread))
- thread = new;
+ thread = new_rtx;
INSN_FROM_TARGET_P (next_to_match) = 0;
}
else
{
if (GET_MODE (merged_insns) == SImode)
{
- rtx new;
+ rtx new_rtx;
update_block (XEXP (merged_insns, 0), thread);
- new = delete_from_delay_slot (XEXP (merged_insns, 0));
+ new_rtx = delete_from_delay_slot (XEXP (merged_insns, 0));
if (INSN_DELETED_P (thread))
- thread = new;
+ thread = new_rtx;
}
else
{
The second instance of `foo' nullifies the dllimport. */
tree
-merge_dllimport_decl_attributes (tree old, tree new)
+merge_dllimport_decl_attributes (tree old, tree new_tree)
{
tree a;
int delete_dllimport_p = 1;
is not dllimport'd. We also remove a `new' dllimport if the old list
contains dllexport: dllexport always overrides dllimport, regardless
of the order of declaration. */
- if (!VAR_OR_FUNCTION_DECL_P (new))
+ if (!VAR_OR_FUNCTION_DECL_P (new_tree))
delete_dllimport_p = 0;
- else if (DECL_DLLIMPORT_P (new)
+ else if (DECL_DLLIMPORT_P (new_tree)
&& lookup_attribute ("dllexport", DECL_ATTRIBUTES (old)))
{
- DECL_DLLIMPORT_P (new) = 0;
+ DECL_DLLIMPORT_P (new_tree) = 0;
warning (OPT_Wattributes, "%q+D already declared with dllexport attribute: "
- "dllimport ignored", new);
+ "dllimport ignored", new_tree);
}
- else if (DECL_DLLIMPORT_P (old) && !DECL_DLLIMPORT_P (new))
+ else if (DECL_DLLIMPORT_P (old) && !DECL_DLLIMPORT_P (new_tree))
{
/* Warn about overriding a symbol that has already been used, e.g.:
extern int __attribute__ ((dllimport)) foo;
if (TREE_USED (old))
{
warning (0, "%q+D redeclared without dllimport attribute "
- "after being referenced with dll linkage", new);
+ "after being referenced with dll linkage", new_tree);
/* If we have used a variable's address with dllimport linkage,
keep the old DECL_DLLIMPORT_P flag: the ADDR_EXPR using the
decl may already have had TREE_CONSTANT computed.
We still remove the attribute so that assembler code refers
to '&foo rather than '_imp__foo'. */
if (TREE_CODE (old) == VAR_DECL && TREE_ADDRESSABLE (old))
- DECL_DLLIMPORT_P (new) = 1;
+ DECL_DLLIMPORT_P (new_tree) = 1;
}
/* Let an inline definition silently override the external reference,
but otherwise warn about attribute inconsistency. */
- else if (TREE_CODE (new) == VAR_DECL
- || !DECL_DECLARED_INLINE_P (new))
+ else if (TREE_CODE (new_tree) == VAR_DECL
+ || !DECL_DECLARED_INLINE_P (new_tree))
warning (OPT_Wattributes, "%q+D redeclared without dllimport attribute: "
- "previous dllimport ignored", new);
+ "previous dllimport ignored", new_tree);
}
else
delete_dllimport_p = 0;
- a = merge_attributes (DECL_ATTRIBUTES (old), DECL_ATTRIBUTES (new));
+ a = merge_attributes (DECL_ATTRIBUTES (old), DECL_ATTRIBUTES (new_tree));
if (delete_dllimport_p)
{