/* Integrated Register Allocator (IRA) entry point.
- Copyright (C) 2006-2019 Free Software Foundation, Inc.
+ Copyright (C) 2006-2020 Free Software Foundation, Inc.
Contributed by Vladimir Makarov <vmakarov@redhat.com>.
This file is part of GCC.
#include "print-rtl.h"
struct target_ira default_target_ira;
-struct target_ira_int default_target_ira_int;
+class target_ira_int default_target_ira_int;
#if SWITCHABLE_TARGET
struct target_ira *this_target_ira = &default_target_ira;
-struct target_ira_int *this_target_ira_int = &default_target_ira_int;
+class target_ira_int *this_target_ira_int = &default_target_ira_int;
#endif
/* A modified value of flag `-fira-verbose' used internally. */
/* The following array contains info about spilled pseudo-registers
stack slots used in current function so far. */
-struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
+class ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
/* Correspondingly overall cost of the allocation, overall cost before
reload, cost of the allocnos assigned to hard-registers, cost of
ira_assert (SHRT_MAX >= FIRST_PSEUDO_REGISTER);
for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
+ temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
CLEAR_HARD_REG_SET (processed_hard_reg_set);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
#ifdef ADJUST_REG_ALLOC_ORDER
ADJUST_REG_ALLOC_ORDER;
#endif
- COPY_HARD_REG_SET (no_unit_alloc_regs, fixed_nonglobal_reg_set);
+ no_unit_alloc_regs = fixed_nonglobal_reg_set;
if (! use_hard_frame_p)
- SET_HARD_REG_BIT (no_unit_alloc_regs, HARD_FRAME_POINTER_REGNUM);
+ add_to_hard_reg_set (&no_unit_alloc_regs, Pmode,
+ HARD_FRAME_POINTER_REGNUM);
setup_class_hard_regs ();
}
if (i == (int) NO_REGS)
continue;
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
+ temp_hard_regset = reg_class_contents[i] & ~no_unit_alloc_regs;
if (hard_reg_set_empty_p (temp_hard_regset))
continue;
for (j = 0; j < N_REG_CLASSES; j++)
{
enum reg_class *p;
- COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[j]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
+ temp_hard_regset2 = reg_class_contents[j] & ~no_unit_alloc_regs;
if (! hard_reg_set_subset_p (temp_hard_regset,
temp_hard_regset2))
continue;
for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
for (cl2 = (int) N_REG_CLASSES - 1; cl2 >= 0; cl2--)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
- COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
+ temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
+ temp_hard_regset2 = reg_class_contents[cl2] & ~no_unit_alloc_regs;
ira_class_subset_p[cl][cl2]
= hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2);
if (! hard_reg_set_empty_p (temp_hard_regset2)
for (i = 0; i < ira_pressure_classes_num; i++)
{
cl = ira_pressure_classes[i];
- COPY_HARD_REG_SET (temp_hard_regset2, temp_hard_regset);
- AND_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
+ temp_hard_regset2 = temp_hard_regset & reg_class_contents[cl];
size = hard_reg_set_size (temp_hard_regset2);
if (best < size)
{
register pressure class. */
for (m = 0; m < NUM_MACHINE_MODES; m++)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
- AND_COMPL_HARD_REG_SET (temp_hard_regset,
- ira_prohibited_class_mode_regs[cl][m]);
+ temp_hard_regset
+ = (reg_class_contents[cl]
+ & ~(no_unit_alloc_regs
+ | ira_prohibited_class_mode_regs[cl][m]));
if (hard_reg_set_empty_p (temp_hard_regset))
continue;
ira_init_register_move_cost_if_necessary ((machine_mode) m);
}
curr = 0;
insert_p = true;
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
+ temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
/* Remove so far added pressure classes which are subset of the
current candidate class. Prefer GENERAL_REGS as a pressure
register class to another class containing the same
for (i = 0; i < n; i++)
{
cl2 = pressure_classes[i];
- COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
+ temp_hard_regset2 = (reg_class_contents[cl2]
+ & ~no_unit_alloc_regs);
if (hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2)
- && (! hard_reg_set_equal_p (temp_hard_regset,
- temp_hard_regset2)
+ && (temp_hard_regset != temp_hard_regset2
|| cl2 == (int) GENERAL_REGS))
{
pressure_classes[curr++] = (enum reg_class) cl2;
continue;
}
if (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset)
- && (! hard_reg_set_equal_p (temp_hard_regset2,
- temp_hard_regset)
+ && (temp_hard_regset2 != temp_hard_regset
|| cl == (int) GENERAL_REGS))
continue;
- if (hard_reg_set_equal_p (temp_hard_regset2, temp_hard_regset))
+ if (temp_hard_regset2 == temp_hard_regset)
insert_p = false;
pressure_classes[curr++] = (enum reg_class) cl2;
}
registers available for the allocation. */
CLEAR_HARD_REG_SET (temp_hard_regset);
CLEAR_HARD_REG_SET (temp_hard_regset2);
- COPY_HARD_REG_SET (ignore_hard_regs, no_unit_alloc_regs);
+ ignore_hard_regs = no_unit_alloc_regs;
for (cl = 0; cl < LIM_REG_CLASSES; cl++)
{
/* For some targets (like MIPS with MD_REGS), there are some
break;
if (m >= NUM_MACHINE_MODES)
{
- IOR_HARD_REG_SET (ignore_hard_regs, reg_class_contents[cl]);
+ ignore_hard_regs |= reg_class_contents[cl];
continue;
}
for (i = 0; i < n; i++)
if ((int) pressure_classes[i] == cl)
break;
- IOR_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
+ temp_hard_regset2 |= reg_class_contents[cl];
if (i < n)
- IOR_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
+ temp_hard_regset |= reg_class_contents[cl];
}
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
/* Some targets (like SPARC with ICC reg) have allocatable regs
for which no reg class is defined. */
if (REGNO_REG_CLASS (i) == NO_REGS)
SET_HARD_REG_BIT (ignore_hard_regs, i);
- AND_COMPL_HARD_REG_SET (temp_hard_regset, ignore_hard_regs);
- AND_COMPL_HARD_REG_SET (temp_hard_regset2, ignore_hard_regs);
+ temp_hard_regset &= ~ignore_hard_regs;
+ temp_hard_regset2 &= ~ignore_hard_regs;
ira_assert (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset));
}
#endif
same set of hard registers. */
for (i = 0; i < LIM_REG_CLASSES; i++)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
+ temp_hard_regset = reg_class_contents[i] & ~no_unit_alloc_regs;
for (j = 0; j < n; j++)
{
cl = classes[j];
- COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset2,
- no_unit_alloc_regs);
- if (hard_reg_set_equal_p (temp_hard_regset,
- temp_hard_regset2))
+ temp_hard_regset2 = reg_class_contents[cl] & ~no_unit_alloc_regs;
+ if (temp_hard_regset == temp_hard_regset2)
break;
}
if (j >= n || targetm.additional_allocno_class_p (i))
for (cl = 0; cl < N_REG_CLASSES; cl++)
if (ira_class_hard_regs_num[cl] > 0)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
+ temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
set_p = false;
for (j = 0; j < ira_allocno_classes_num; j++)
{
- COPY_HARD_REG_SET (temp_hard_regset2,
- reg_class_contents[ira_allocno_classes[j]]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
+ temp_hard_regset2 = (reg_class_contents[ira_allocno_classes[j]]
+ & ~no_unit_alloc_regs);
if ((enum reg_class) cl == ira_allocno_classes[j])
break;
else if (hard_reg_set_subset_p (temp_hard_regset,
for (i = 0; i < classes_num; i++)
{
aclass = classes[i];
- COPY_HARD_REG_SET (temp_hard_regset,
- reg_class_contents[aclass]);
- AND_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
+ temp_hard_regset = (reg_class_contents[aclass]
+ & reg_class_contents[cl]
+ & ~no_unit_alloc_regs);
if (! hard_reg_set_empty_p (temp_hard_regset))
{
min_cost = INT_MAX;
ira_reg_classes_intersect_p[cl1][cl2] = false;
ira_reg_class_intersect[cl1][cl2] = NO_REGS;
ira_reg_class_subset[cl1][cl2] = NO_REGS;
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl1]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
- COPY_HARD_REG_SET (temp_set2, reg_class_contents[cl2]);
- AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
+ temp_hard_regset = reg_class_contents[cl1] & ~no_unit_alloc_regs;
+ temp_set2 = reg_class_contents[cl2] & ~no_unit_alloc_regs;
if (hard_reg_set_empty_p (temp_hard_regset)
&& hard_reg_set_empty_p (temp_set2))
{
}
ira_reg_class_subunion[cl1][cl2] = NO_REGS;
ira_reg_class_superunion[cl1][cl2] = NO_REGS;
- COPY_HARD_REG_SET (intersection_set, reg_class_contents[cl1]);
- AND_HARD_REG_SET (intersection_set, reg_class_contents[cl2]);
- AND_COMPL_HARD_REG_SET (intersection_set, no_unit_alloc_regs);
- COPY_HARD_REG_SET (union_set, reg_class_contents[cl1]);
- IOR_HARD_REG_SET (union_set, reg_class_contents[cl2]);
- AND_COMPL_HARD_REG_SET (union_set, no_unit_alloc_regs);
+ intersection_set = (reg_class_contents[cl1]
+ & reg_class_contents[cl2]
+ & ~no_unit_alloc_regs);
+ union_set = ((reg_class_contents[cl1] | reg_class_contents[cl2])
+ & ~no_unit_alloc_regs);
for (cl3 = 0; cl3 < N_REG_CLASSES; cl3++)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl3]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
+ temp_hard_regset = reg_class_contents[cl3] & ~no_unit_alloc_regs;
if (hard_reg_set_subset_p (temp_hard_regset, intersection_set))
{
/* CL3 allocatable hard register set is inside of
of CL1 and CL2. */
if (important_class_p[cl3])
{
- COPY_HARD_REG_SET
- (temp_set2,
- reg_class_contents
- [(int) ira_reg_class_intersect[cl1][cl2]]);
- AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
+ temp_set2
+ = (reg_class_contents
+ [ira_reg_class_intersect[cl1][cl2]]);
+ temp_set2 &= ~no_unit_alloc_regs;
if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
/* If the allocatable hard register sets are
the same, prefer GENERAL_REGS or the
smallest class for debugging
purposes. */
- || (hard_reg_set_equal_p (temp_hard_regset, temp_set2)
+ || (temp_hard_regset == temp_set2
&& (cl3 == GENERAL_REGS
|| ((ira_reg_class_intersect[cl1][cl2]
!= GENERAL_REGS)
ira_reg_class_intersect[cl1][cl2]])))))
ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
}
- COPY_HARD_REG_SET
- (temp_set2,
- reg_class_contents[(int) ira_reg_class_subset[cl1][cl2]]);
- AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
+ temp_set2
+ = (reg_class_contents[ira_reg_class_subset[cl1][cl2]]
+ & ~no_unit_alloc_regs);
if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
/* Ignore unavailable hard registers and prefer
smallest class for debugging purposes. */
- || (hard_reg_set_equal_p (temp_hard_regset, temp_set2)
+ || (temp_hard_regset == temp_set2
&& hard_reg_set_subset_p
(reg_class_contents[cl3],
reg_class_contents
/* CL3 allocatable hard register set is inside of
union of allocatable hard register sets of CL1
and CL2. */
- COPY_HARD_REG_SET
- (temp_set2,
- reg_class_contents[(int) ira_reg_class_subunion[cl1][cl2]]);
- AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
+ temp_set2
+ = (reg_class_contents[ira_reg_class_subunion[cl1][cl2]]
+ & ~no_unit_alloc_regs);
if (ira_reg_class_subunion[cl1][cl2] == NO_REGS
|| (hard_reg_set_subset_p (temp_set2, temp_hard_regset)
- && (! hard_reg_set_equal_p (temp_set2,
- temp_hard_regset)
+ && (temp_set2 != temp_hard_regset
|| cl3 == GENERAL_REGS
/* If the allocatable hard register sets are the
same, prefer GENERAL_REGS or the smallest
/* CL3 allocatable hard register set contains union
of allocatable hard register sets of CL1 and
CL2. */
- COPY_HARD_REG_SET
- (temp_set2,
- reg_class_contents[(int) ira_reg_class_superunion[cl1][cl2]]);
- AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
+ temp_set2
+ = (reg_class_contents[ira_reg_class_superunion[cl1][cl2]]
+ & ~no_unit_alloc_regs);
if (ira_reg_class_superunion[cl1][cl2] == NO_REGS
|| (hard_reg_set_subset_p (temp_hard_regset, temp_set2)
- && (! hard_reg_set_equal_p (temp_set2,
- temp_hard_regset)
+ && (temp_set2 != temp_hard_regset
|| cl3 == GENERAL_REGS
/* If the allocatable hard register sets are the
same, prefer GENERAL_REGS or the smallest
for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
- AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
+ temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
for (j = 0; j < NUM_MACHINE_MODES; j++)
{
count = 0;
{
int i;
static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
+ int fp_reg_count = hard_regno_nregs (HARD_FRAME_POINTER_REGNUM, Pmode);
/* Setup is_leaf as frame_pointer_required may use it. This function
is called by sched_init before ira if scheduling is enabled. */
frame pointer in LRA. */
if (frame_pointer_needed)
- df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM, true);
+ for (i = 0; i < fp_reg_count; i++)
+ df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM + i, true);
- COPY_HARD_REG_SET (ira_no_alloc_regs, no_unit_alloc_regs);
+ ira_no_alloc_regs = no_unit_alloc_regs;
CLEAR_HARD_REG_SET (eliminable_regset);
compute_regs_asm_clobbered ();
}
if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
{
- if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, HARD_FRAME_POINTER_REGNUM))
- {
- SET_HARD_REG_BIT (eliminable_regset, HARD_FRAME_POINTER_REGNUM);
- if (frame_pointer_needed)
- SET_HARD_REG_BIT (ira_no_alloc_regs, HARD_FRAME_POINTER_REGNUM);
- }
- else if (frame_pointer_needed)
- error ("%s cannot be used in %<asm%> here",
- reg_names[HARD_FRAME_POINTER_REGNUM]);
- else
- df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM, true);
+ for (i = 0; i < fp_reg_count; i++)
+ if (!TEST_HARD_REG_BIT (crtl->asm_clobbers,
+ HARD_FRAME_POINTER_REGNUM + i))
+ {
+ SET_HARD_REG_BIT (eliminable_regset,
+ HARD_FRAME_POINTER_REGNUM + i);
+ if (frame_pointer_needed)
+ SET_HARD_REG_BIT (ira_no_alloc_regs,
+ HARD_FRAME_POINTER_REGNUM + i);
+ }
+ else if (frame_pointer_needed)
+ error ("%s cannot be used in %<asm%> here",
+ reg_names[HARD_FRAME_POINTER_REGNUM + i]);
+ else
+ df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM + i, true);
}
}
for (i = 0; i < nwords; i++)
{
obj = ALLOCNO_OBJECT (a, i);
- IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
- reg_class_contents[pclass]);
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)
+ |= ~reg_class_contents[pclass];
}
- if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
- && ira_hard_reg_set_intersection_p (hard_regno, ALLOCNO_MODE (a),
- call_used_reg_set))
+ if (ira_need_caller_save_p (a, hard_regno))
{
ira_assert (!optimize || flag_caller_saves
|| (ALLOCNO_CALLS_CROSSED_NUM (a)
return valid_none;
}
- note_stores (PATTERN (insn), validate_equiv_mem_from_store, &info);
+ note_stores (insn, validate_equiv_mem_from_store, &info);
if (info.equiv_mem_modified)
return valid_none;
case CC0:
case CLOBBER:
- case CLOBBER_HIGH:
return 0;
case PRE_INC:
return memref_referenced_p (memref, SET_SRC (x), true);
case CLOBBER:
- case CLOBBER_HIGH:
if (process_set_for_memref_referenced_p (memref, XEXP (x, 0)))
return true;
return true;
}
+/* Scan the instructions before update_equiv_regs. Record which registers
+ are referenced as paradoxical subregs. Also check for cases in which
+ the current function needs to save a register that one of its call
+ instructions clobbers.
+
+ These things are logically unrelated, but it's more efficient to do
+ them together. */
+
+static void
+update_equiv_regs_prescan (void)
+{
+ basic_block bb;
+ rtx_insn *insn;
+ function_abi_aggregator callee_abis;
+
+ FOR_EACH_BB_FN (bb, cfun)
+ FOR_BB_INSNS (bb, insn)
+ if (NONDEBUG_INSN_P (insn))
+ {
+ set_paradoxical_subreg (insn);
+ if (CALL_P (insn))
+ callee_abis.note_callee_abi (insn_callee_abi (insn));
+ }
+
+ HARD_REG_SET extra_caller_saves = callee_abis.caller_save_regs (*crtl->abi);
+ if (!hard_reg_set_empty_p (extra_caller_saves))
+ for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
+ if (TEST_HARD_REG_BIT (extra_caller_saves, regno))
+ df_set_regs_ever_live (regno, true);
+}
+
/* Find registers that are equivalent to a single value throughout the
compilation (either because they can be referenced in memory or are
set once from a single constant). Lower their priority for a
rtx_insn *insn;
basic_block bb;
- /* Scan insns and set pdx_subregs if the reg is used in a
- paradoxical subreg. Don't set such reg equivalent to a mem,
- because lra will not substitute such equiv memory in order to
- prevent access beyond allocated memory for paradoxical memory subreg. */
- FOR_EACH_BB_FN (bb, cfun)
- FOR_BB_INSNS (bb, insn)
- if (NONDEBUG_INSN_P (insn))
- set_paradoxical_subreg (insn);
-
/* Scan the insns and find which registers have equivalences. Do this
in a separate scan of the insns because (due to -fcse-follow-jumps)
a register can be set below its use. */
if (set == NULL_RTX
|| side_effects_p (SET_SRC (set)))
{
- note_stores (PATTERN (insn), no_equiv, NULL);
+ note_pattern_stores (PATTERN (insn), no_equiv, NULL);
continue;
}
else if (GET_CODE (PATTERN (insn)) == PARALLEL)
{
rtx part = XVECEXP (PATTERN (insn), 0, i);
if (part != set)
- note_stores (part, no_equiv, NULL);
+ note_pattern_stores (part, no_equiv, NULL);
}
}
{
/* This might be setting a SUBREG of a pseudo, a pseudo that is
also set somewhere else to a constant. */
- note_stores (set, no_equiv, NULL);
+ note_pattern_stores (set, no_equiv, NULL);
continue;
}
equivalent to a mem. */
if (MEM_P (src) && reg_equiv[regno].pdx_subregs)
{
- note_stores (set, no_equiv, NULL);
+ note_pattern_stores (set, no_equiv, NULL);
continue;
}
if (can_throw_internal (def_insn))
continue;
+ /* Instructions with multiple sets can only be moved if DF analysis is
+ performed for all of the registers set. See PR91052. */
+ if (multiple_sets (def_insn))
+ continue;
+
basic_block use_bb = BLOCK_FOR_INSN (use_insn);
basic_block def_bb = BLOCK_FOR_INSN (def_insn);
if (bb_loop_depth (use_bb) > bb_loop_depth (def_bb))
/* Print chain C to FILE. */
static void
-print_insn_chain (FILE *file, struct insn_chain *c)
+print_insn_chain (FILE *file, class insn_chain *c)
{
fprintf (file, "insn=%d, ", INSN_UID (c->insn));
bitmap_print (file, &c->live_throughout, "live_throughout: ", ", ");
static void
print_insn_chains (FILE *file)
{
- struct insn_chain *c;
+ class insn_chain *c;
for (c = reload_insn_chain; c ; c = c->next)
print_insn_chain (file, c);
}
build_insn_chain (void)
{
unsigned int i;
- struct insn_chain **p = &reload_insn_chain;
+ class insn_chain **p = &reload_insn_chain;
basic_block bb;
- struct insn_chain *c = NULL;
- struct insn_chain *next = NULL;
+ class insn_chain *c = NULL;
+ class insn_chain *next = NULL;
auto_bitmap live_relevant_regs;
auto_bitmap elim_regset;
/* live_subregs is a vector used to keep accurate information about
&& rtx_moveable_p (&XEXP (x, 2), OP_IN));
case CLOBBER:
- case CLOBBER_HIGH:
return rtx_moveable_p (&SET_DEST (x), OP_OUT);
case UNSPEC_VOLATILE:
for (int i = 0; i < XVECLEN (pat, 0); i++)
{
rtx sub = XVECEXP (pat, 0, i);
- if (GET_CODE (sub) == USE
- || GET_CODE (sub) == CLOBBER
- || GET_CODE (sub) == CLOBBER_HIGH)
+ if (GET_CODE (sub) == USE || GET_CODE (sub) == CLOBBER)
continue;
if (GET_CODE (sub) != SET
|| side_effects_p (sub))
int ira_max_point_before_emit;
bool saved_flag_caller_saves = flag_caller_saves;
enum ira_region saved_flag_ira_region = flag_ira_region;
- unsigned int i;
- int num_used_regs = 0;
clear_bb_flags ();
ira_conflicts_p = optimize > 0;
/* Determine the number of pseudos actually requiring coloring. */
- for (i = FIRST_PSEUDO_REGISTER; i < DF_REG_SIZE (df); i++)
- num_used_regs += !!(DF_REG_USE_COUNT (i) + DF_REG_DEF_COUNT (i));
+ unsigned int num_used_regs = 0;
+ for (unsigned int i = FIRST_PSEUDO_REGISTER; i < DF_REG_SIZE (df); i++)
+ if (DF_REG_DEF_COUNT (i) || DF_REG_USE_COUNT (i))
+ num_used_regs++;
/* If there are too many pseudos and/or basic blocks (e.g. 10K
pseudos and 10K blocks or 100K pseudos and 1K blocks), we will
use simplified and faster algorithms in LRA. */
lra_simple_p
- = (ira_use_lra_p
- && num_used_regs >= (1 << 26) / last_basic_block_for_fn (cfun));
+ = ira_use_lra_p
+ && num_used_regs >= (1U << 26) / last_basic_block_for_fn (cfun);
if (lra_simple_p)
{
/* It permits to skip live range splitting in LRA. */
flag_caller_saves = false;
/* There is no sense to do regional allocation when we use
- simplified LRA. */
+ simplified LRA. */
flag_ira_region = IRA_REGION_ONE;
ira_conflicts_p = false;
}
init_alias_analysis ();
loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
reg_equiv = XCNEWVEC (struct equivalence, max_reg_num ());
+ update_equiv_regs_prescan ();
update_equiv_regs ();
/* Don't move insns if live range shrinkage or register
{
ira_spilled_reg_stack_slots_num = 0;
ira_spilled_reg_stack_slots
- = ((struct ira_spilled_reg_stack_slot *)
+ = ((class ira_spilled_reg_stack_slot *)
ira_allocate (max_regno
- * sizeof (struct ira_spilled_reg_stack_slot)));
+ * sizeof (class ira_spilled_reg_stack_slot)));
memset ((void *)ira_spilled_reg_stack_slots, 0,
- max_regno * sizeof (struct ira_spilled_reg_stack_slot));
+ max_regno * sizeof (class ira_spilled_reg_stack_slot));
}
}
allocate_initial_values ();
poly_int64 size = get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE;
for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (df_regs_ever_live_p (i) && !fixed_regs[i] && call_used_regs[i])
+ if (df_regs_ever_live_p (i)
+ && !fixed_regs[i]
+ && !crtl->abi->clobbers_full_reg_p (i))
size += UNITS_PER_WORD;
if (constant_lower_bound (size) > STACK_CHECK_MAX_FRAME_SIZE)