r <- ... or ... <- r r <- ... or ... <- r
... s <- r (new insn -- save)
- ... =>
+ ... =>
... r <- s (new insn -- restore)
... <- r ... <- r
enum machine_mode reg_mode;
int class_size, hard_regno, nregs, i, j;
int regno = REGNO (reg);
-
+
if (new_class != NULL)
*new_class = NO_REGS;
if (regno < FIRST_PSEUDO_REGISTER)
{
rtx final_reg = reg;
rtx *final_loc = &final_reg;
-
+
lra_eliminate_reg_if_possible (final_loc);
return TEST_HARD_REG_BIT (reg_class_contents[cl], REGNO (*final_loc));
}
ok_for_index_p_nonstrict (rtx reg)
{
unsigned regno = REGNO (reg);
-
+
return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
}
&& (REG_P (y) || (GET_CODE (y) == SUBREG && REG_P (SUBREG_REG (y)))))
{
int j;
-
+
i = get_hard_regno (x);
if (i < 0)
goto slow;
if (GET_CODE (y) == PRE_DEC || GET_CODE (y) == PRE_INC
|| GET_CODE (y) == PRE_MODIFY)
return operands_match_p (x, XEXP (y, 0), -1);
-
+
slow:
if (code == REG && GET_CODE (y) == SUBREG && REG_P (SUBREG_REG (y))
pseudo which is wrong when the input pseudo lives after the
insn and as the new pseudo value is changed by the insn
output. Therefore we create the new pseudo from the output.
-
+
We cannot reuse the current output register because we might
have a situation like "a <- a op b", where the constraints
force the second input operand ("b") to match the output
operand ("a"). "b" must then be copied into a new register
so that it doesn't clobber the current value of "a". */
-
+
new_in_reg = new_out_reg
= lra_create_new_reg_with_unique_value (outmode, out_rtx,
goal_class, "");
[op_class][base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
ADDRESS, SCRATCH)]);
break;
-
+
case 'g':
case 'r':
op_class = reg_class_subunion[op_class][GENERAL_REGS];
break;
-
+
default:
if (REG_CLASS_FROM_CONSTRAINT (c, p) == NO_REGS)
{
#endif
break;
}
-
+
op_class
= reg_class_subunion[op_class][REG_CLASS_FROM_CONSTRAINT (c, p)];
break;
if (GET_CODE (operand) != SUBREG)
return false;
-
+
mode = GET_MODE (operand);
reg = SUBREG_REG (operand);
/* If we change address for paradoxical subreg of memory, the
if (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (mode))
mode = GET_MODE (x);
}
-
+
if (REG_P (x))
{
x_hard_regno = get_hard_regno (x);
op = no_subreg_reg_operand[nop] = *curr_id->operand_loc[nop];
/* The real hard regno of the operand after the allocation. */
hard_regno[nop] = get_hard_regno (op);
-
+
operand_reg[nop] = op;
biggest_mode[nop] = GET_MODE (operand_reg[nop]);
if (GET_CODE (operand_reg[nop]) == SUBREG)
curr_alt_matches[nop] = -1;
continue;
}
-
+
op = no_subreg_reg_operand[nop];
mode = curr_operand_mode[nop];
win = did_match = winreg = offmemok = constmemok = false;
badop = true;
-
+
early_clobber_p = false;
p = curr_static_id->operand_alternative[opalt_num].constraint;
-
+
this_costly_alternative = this_alternative = NO_REGS;
/* We update set of possible hard regs besides its class
because reg class might be inaccurate. For example,
this_alternative_match_win = false;
this_alternative_offmemok = false;
this_alternative_matches = -1;
-
+
/* An empty constraint should be excluded by the fast
track. */
lra_assert (*p != 0 && *p != ',');
-
+
/* Scan this alternative's specs for this operand; set WIN
if the operand fits any letter in this alternative.
Otherwise, clear BADOP if this operand could fit some
case ',':
c = '\0';
break;
-
+
case '=': case '+': case '?': case '*': case '!':
case ' ': case '\t':
break;
-
+
case '%':
/* We only support one commutative marker, the first
one. We already set commutative above. */
break;
-
+
case '&':
early_clobber_p = true;
break;
-
+
case '#':
/* Ignore rest of this alternative. */
c = '\0';
break;
-
+
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
{
int m_hregno;
bool match_p;
-
+
m = strtoul (p, &end, 10);
p = end;
len = 0;
lra_assert (nop > m);
-
+
this_alternative_matches = m;
m_hregno = get_hard_regno (*curr_id->operand_loc[m]);
/* We are supposed to match a previous operand.
&& MEM_P (*curr_id->operand_loc[m])
&& curr_alt[m] == NO_REGS && ! curr_alt_win[m])
continue;
-
+
}
else
{
+= (ira_reg_class_max_nregs[curr_alt[m]]
[GET_MODE (*curr_id->operand_loc[m])]);
}
-
+
/* We prefer no matching alternatives because
it gives more freedom in RA. */
if (operand_reg[nop] == NULL_RTX
}
else
did_match = true;
-
+
/* This can be fixed with reloads if the operand
we are supposed to match can be fixed with
reloads. */
COPY_HARD_REG_SET (this_alternative_set, curr_alt_set[m]);
break;
}
-
+
case 'p':
cl = base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
ADDRESS, SCRATCH);
win = true;
badop = false;
break;
-
+
case TARGET_MEM_CONSTRAINT:
if (MEM_P (op) || spilled_pseudo_p (op))
win = true;
badop = false;
constmemok = true;
break;
-
+
case '<':
if (MEM_P (op)
&& (GET_CODE (XEXP (op, 0)) == PRE_DEC
|| GET_CODE (XEXP (op, 0)) == POST_DEC))
win = true;
break;
-
+
case '>':
if (MEM_P (op)
&& (GET_CODE (XEXP (op, 0)) == PRE_INC
|| GET_CODE (XEXP (op, 0)) == POST_INC))
win = true;
break;
-
+
/* Memory op whose address is not offsettable. */
case 'V':
if (MEM_P (op)
&& ! offsettable_nonstrict_memref_p (op))
win = true;
break;
-
+
/* Memory operand whose address is offsettable. */
case 'o':
if ((MEM_P (op)
constmemok = true;
offmemok = true;
break;
-
+
case 'E':
case 'F':
if (GET_CODE (op) == CONST_DOUBLE
&& (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)))
win = true;
break;
-
+
case 'G':
case 'H':
if (GET_CODE (op) == CONST_DOUBLE
&& CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, c, p))
win = true;
break;
-
+
case 's':
if (CONST_INT_P (op)
|| (GET_CODE (op) == CONST_DOUBLE && mode == VOIDmode))
if (general_constant_p (op))
win = true;
break;
-
+
case 'n':
if (CONST_INT_P (op)
|| (GET_CODE (op) == CONST_DOUBLE && mode == VOIDmode))
win = true;
break;
-
+
case 'I':
case 'J':
case 'K':
&& CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), c, p))
win = true;
break;
-
+
case 'X':
/* This constraint should be excluded by the fast
track. */
gcc_unreachable ();
break;
-
+
case 'g':
if (MEM_P (op)
|| general_constant_p (op)
|| spilled_pseudo_p (op))
win = true;
/* Drop through into 'r' case. */
-
+
case 'r':
this_alternative
= reg_class_subunion[this_alternative][GENERAL_REGS];
reg_class_contents[GENERAL_REGS]);
}
goto reg;
-
+
default:
if (REG_CLASS_FROM_CONSTRAINT (c, p) == NO_REGS)
{
win = true;
else if (spilled_pseudo_p (op))
win = true;
-
+
/* If we didn't already win, we can reload
constants via force_const_mem, and other
MEMs by reloading the address like for
{
if (EXTRA_CONSTRAINT_STR (op, c, p))
win = true;
-
+
/* If we didn't already win, we can reload
the address into a base register. */
cl = base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
badop = false;
break;
}
-
+
if (EXTRA_CONSTRAINT_STR (op, c, p))
win = true;
#endif
break;
}
-
+
cl = REG_CLASS_FROM_CONSTRAINT (c, p);
this_alternative = reg_class_subunion[this_alternative][cl];
IOR_HARD_REG_SET (this_alternative_set,
costly_p = c == '*';
}
while ((p += len), c);
-
+
/* Record which operands fit this alternative. */
if (win)
{
then this operand can be reloaded. */
if (winreg && !no_regs_p)
badop = false;
-
+
if (badop)
goto fail;
if (! no_regs_p)
losers++;
}
-
+
/* Alternative loses if it requires a type of reload not
permitted for this insn. We can always reload
objects with a REG_UNUSED note. */
|| (curr_static_id->operand[nop].type != OP_OUT
&& no_input_reloads_p && ! const_to_mem))
goto fail;
-
+
/* If we can't reload this value at all, reject this
alternative. Note that we could also lose due to
LIMIT_RELOAD_CLASS, but we don't check that here. */
if (targetm.preferred_reload_class
(op, this_alternative) == NO_REGS)
reject = MAX_OVERALL_COST_BOUND;
-
+
if (curr_static_id->operand[nop].type == OP_OUT
&& (targetm.preferred_output_reload_class
(op, this_alternative) == NO_REGS))
reject = MAX_OVERALL_COST_BOUND;
}
-
+
if (! ((const_to_mem && constmemok)
|| (MEM_P (op) && offmemok)))
{
match then. */
if (! (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER))
reject += 2;
-
+
if (! no_regs_p)
reload_nregs
+= ira_reg_class_max_nregs[this_alternative][mode];
if (!REG_P (op) || curr_static_id->operand[nop].type != OP_IN)
reject++;
}
-
+
if (early_clobber_p)
reject++;
/* ??? We check early clobbers after processing all operands
curr_alt_match_win[nop] = this_alternative_match_win;
curr_alt_offmemok[nop] = this_alternative_offmemok;
curr_alt_matches[nop] = this_alternative_matches;
-
+
if (this_alternative_matches >= 0
&& !did_match && !this_alternative_win)
curr_alt_win[this_alternative_matches] = false;
-
+
if (early_clobber_p && operand_reg[nop] != NULL_RTX)
early_clobbered_nops[early_clobbered_regs_num++] = nop;
}
}
if (losers == 0)
/* Everything is satisfied. Do not process alternatives
- anymore. */
+ anymore. */
break;
fail:
;
lra_assert (ADDR_SPACE_GENERIC_P (as));
GO_IF_LEGITIMATE_ADDRESS (mode, addr, win);
return 0;
-
+
win:
return 1;
#else
change_p = true;
lra_update_dup (curr_id, i);
}
-
+
if (change_p)
/* If we've changed the instruction then any alternative that
we chose previously may no longer be valid. */
/* Right now, for any pair of operands I and J that are required to
match, with J < I, goal_alt_matches[I] is J. Add I to
goal_alt_matched[J]. */
-
+
for (i = 0; i < n_operands; i++)
if ((j = goal_alt_matches[i]) >= 0)
{
goal_alt_matched[j][k] = i;
goal_alt_matched[j][k + 1] = -1;
}
-
+
for (i = 0; i < n_operands; i++)
goal_alt_win[i] |= goal_alt_match_win[i];
-
+
/* Any constants that aren't allowed and can't be reloaded into
registers are here changed into memory references. */
for (i = 0; i < n_operands; i++)
if (GET_CODE (reg) == SUBREG)
reg = SUBREG_REG (reg);
-
+
if (REG_P (reg) && (regno = REGNO (reg)) >= FIRST_PSEUDO_REGISTER)
{
bool ok_p = in_class_p (reg, goal_alt[i], &new_class);
rtx op = *curr_id->operand_loc[i];
rtx subreg = NULL_RTX;
enum machine_mode mode = curr_operand_mode[i];
-
+
if (GET_CODE (op) == SUBREG)
{
subreg = op;
op = SUBREG_REG (op);
mode = GET_MODE (op);
}
-
+
if (CONST_POOL_OK_P (mode, op)
&& ((targetm.preferred_reload_class
(op, (enum reg_class) goal_alt[i]) == NO_REGS)
|| no_input_reloads_p))
{
rtx tem = force_const_mem (mode, op);
-
+
change_p = true;
if (subreg != NULL_RTX)
tem = gen_rtx_SUBREG (mode, tem, SUBREG_BYTE (subreg));
-
+
*curr_id->operand_loc[i] = tem;
lra_update_dup (curr_id, i);
process_address (i, &before, &after);
-
+
/* If the alternative accepts constant pool refs directly
there will be no reload needed at all. */
if (subreg != NULL_RTX)
}
if (c == '\0' || c == ',' || c == '#')
continue;
-
+
goal_alt_win[i] = true;
}
}
-
+
for (i = 0; i < n_operands; i++)
{
rtx old, new_reg;
change_class (REGNO (op), NO_REGS, " Change", true);
continue;
}
-
+
/* Operands that match previous ones have already been handled. */
if (goal_alt_matches[i] >= 0)
continue;
basic_block bb = NULL;
unsigned int uid;
bitmap_iterator bi;
-
+
if (regno < FIRST_PSEUDO_REGISTER)
return false;
-
+
EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi)
if (bb == NULL)
bb = BLOCK_FOR_INSN (lra_insn_recog_data[uid]->insn);
if ((hard_regno = lra_get_regno_hard_regno (i)) >= 0)
{
int j, nregs = hard_regno_nregs[hard_regno][PSEUDO_REGNO_MODE (i)];
-
+
for (j = 0; j < nregs; j++)
df_set_regs_ever_live (hard_regno + j, true);
}
{
curr_insn = lra_pop_insn ();
--new_min_len;
- curr_bb = BLOCK_FOR_INSN (curr_insn);
+ curr_bb = BLOCK_FOR_INSN (curr_insn);
if (curr_bb != last_bb)
{
last_bb = curr_bb;
can not be changed. Such insns might be not in
init_insns because we don't update equiv data
during insn transformations.
-
+
As an example, let suppose that a pseudo got
hard register and on the 1st pass was not
changed to equivalent constant. We generate an
&& (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
{
int j, nregs = hard_regno_nregs[hard_regno][PSEUDO_REGNO_MODE (i)];
-
+
for (j = 0; j < nregs; j++)
lra_assert (df_regs_ever_live_p (hard_regno + j));
}
add_next_usage_insn (int regno, rtx insn, int reloads_num)
{
rtx next_usage_insns;
-
+
if (usage_insns[regno].check == curr_usage_insns_check
&& (next_usage_insns = usage_insns[regno].insns) != NULL_RTX
&& DEBUG_INSN_P (insn))
else
usage_insns[regno].check = 0;
}
-
+
/* Replace all references to register OLD_REGNO in *LOC with pseudo
register NEW_REG. Return true if any change was made. */
static bool
... =>
<- ... p ... <- ... i ...
where p is a spilled original pseudo and i is a new inheritance pseudo.
-
-
+
+
The inheritance pseudo has the smallest class of two classes CL and
class of ORIGINAL REGNO. */
static bool
if (lra_dump_file != NULL)
fprintf (lra_dump_file, " Use smallest class of %s and %s\n",
reg_class_names[cl], reg_class_names[rclass]);
-
+
rclass = cl;
}
if (check_secondary_memory_needed_p (cl, next_usage_insns))
int i;
enum reg_class cl, best_cl = NO_REGS;
enum reg_class hard_reg_class = REGNO_REG_CLASS (hard_regno);
-
+
if (! SECONDARY_MEMORY_NEEDED (allocno_class, allocno_class, mode)
&& TEST_HARD_REG_BIT (reg_class_contents[allocno_class], hard_regno))
return allocno_class;
if (call_save_p)
{
enum machine_mode sec_mode;
-
+
#ifdef SECONDARY_MEMORY_NEEDED_MODE
sec_mode = SECONDARY_MEMORY_NEEDED_MODE (GET_MODE (original_reg));
#else
edge e;
edge_iterator ei;
- last_bb = BLOCK_FOR_INSN (tail);
+ last_bb = BLOCK_FOR_INSN (tail);
prev_bb = NULL;
for (curr_insn = tail;
curr_insn != PREV_INSN (head);
if (reg->type != OP_IN)
bitmap_set_bit (res, reg->regno);
}
-
+
/* Used as a temporary results of some bitmap calculations. */
static bitmap_head temp_bitmap;
pseudos for potential splitting. */
to_process = df_get_live_out (curr_bb);
if (last_processed_bb != NULL)
- {
+ {
/* We are somewhere in the middle of EBB. */
get_live_on_other_edges (curr_bb, last_processed_bb,
&temp_bitmap);
= usage_insns[dst_regno].insns) != NULL_RTX)
{
struct lra_insn_reg *r;
-
+
for (r = curr_id->regs; r != NULL; r = r->next)
if (r->type != OP_OUT && r->regno == dst_regno)
break;
&& ! reg->subreg_p && reg->type == OP_OUT)
{
HARD_REG_SET s;
-
+
if (split_if_necessary (dst_regno, reg->biggest_mode,
potential_reload_hard_regs,
false, curr_insn, max_uid))
rest of spliting in the current BB. */
to_process = df_get_live_in (curr_bb);
if (BLOCK_FOR_INSN (head) != curr_bb)
- {
+ {
/* We are somewhere in the middle of EBB. */
get_live_on_other_edges (EDGE_PRED (curr_bb, 0)->src,
curr_bb, &temp_bitmap);
/* This value affects EBB forming. If probability of edge from EBB to
a BB is not greater than the following value, we don't add the BB
- to EBB. */
+ to EBB. */
#define EBB_PROBABILITY_CUTOFF (REG_BR_PROB_BASE / 2)
/* Current number of inheritance/split iteration. */
dregno = get_regno (SET_DEST (set));
sregno = get_regno (SET_SRC (set));
}
-
+
if (sregno >= 0 && dregno >= 0)
{
if ((bitmap_bit_p (remove_pseudos, sregno)
struct elim_table
{
/* Hard register number to be eliminated. */
- int from;
+ int from;
/* Hard register number used as replacement. */
- int to;
+ int to;
/* Difference between values of the two hard registers above on
previous iteration. */
HOST_WIDE_INT previous_offset;
/* Difference between the values on the current iteration. */
- HOST_WIDE_INT offset;
+ HOST_WIDE_INT offset;
/* Nonzero if this elimination can be done. */
- bool can_eliminate;
+ bool can_eliminate;
/* CAN_ELIMINATE since the last check. */
bool prev_can_eliminate;
/* REG rtx for the register to be eliminated. We cannot simply
compare the number since we might then spuriously replace a hard
register corresponding to a pseudo assigned to the reg to be
eliminated. */
- rtx from_rtx;
+ rtx from_rtx;
/* REG rtx for the replacement. */
- rtx to_rtx;
+ rtx to_rtx;
};
/* The elimination table. Each array entry describes one possible way
if ((ep = get_elimination (x)) != NULL)
{
rtx to = subst_p ? ep->to_rtx : ep->from_rtx;
-
+
if (update_p)
return plus_constant (Pmode, to, ep->offset - ep->previous_offset);
else if (full_p)
{
HOST_WIDE_INT offset;
rtx to = subst_p ? ep->to_rtx : ep->from_rtx;
-
+
if (! update_p && ! full_p)
return gen_rtx_PLUS (Pmode, to, XEXP (x, 1));
-
+
offset = (update_p
? ep->offset - ep->previous_offset : ep->offset);
if (CONST_INT_P (XEXP (x, 1))
&& (ep = get_elimination (XEXP (x, 0))) != NULL)
{
rtx to = subst_p ? ep->to_rtx : ep->from_rtx;
-
+
if (update_p)
return
plus_constant (Pmode,
else
return gen_rtx_MULT (Pmode, to, XEXP (x, 1));
}
-
+
/* ... fall through ... */
case CALL:
&& (ep = get_elimination (SET_DEST (old_set))) != NULL)
{
bool delete_p = replace_p;
-
+
#ifdef HARD_FRAME_POINTER_REGNUM
/* If this is setting the frame pointer register to the hardware
frame pointer register and this is an elimination that will
rtx base = SET_SRC (old_set);
HOST_WIDE_INT offset = 0;
rtx base_insn = insn;
-
+
while (base != ep->to_rtx)
{
rtx prev_insn, prev_set;
-
+
if (GET_CODE (base) == PLUS && CONST_INT_P (XEXP (base, 1)))
{
offset += INTVAL (XEXP (base, 1));
else
break;
}
-
+
if (base == ep->to_rtx)
{
rtx src;
-
+
offset -= (ep->offset - ep->previous_offset);
src = plus_constant (Pmode, ep->to_rtx, offset);
-
+
/* First see if this insn remains valid when we make
the change. If not, keep the INSN_CODE the same
and let the constraint pass fit it up. */
return;
}
}
-
-
+
+
/* We can't delete this insn, but needn't process it
since it won't be used unless something changes. */
delete_p = false;
}
#endif
-
+
/* This insn isn't serving a useful purpose. We delete it
when REPLACE is set. */
if (delete_p)
if (REG_P (reg) && (ep = get_elimination (reg)) != NULL)
{
rtx to_rtx = replace_p ? ep->to_rtx : ep->from_rtx;
-
+
if (! replace_p)
{
offset += (ep->offset - ep->previous_offset);
offset = trunc_int_for_mode (offset, GET_MODE (plus_cst_src));
}
-
+
if (GET_CODE (XEXP (plus_cst_src, 0)) == SUBREG)
to_rtx = gen_lowpart (GET_MODE (XEXP (plus_cst_src, 0)), to_rtx);
/* If we have a nonzero offset, and the source is already a
if (offset == 0 || plus_src)
{
rtx new_src = plus_constant (GET_MODE (to_rtx), to_rtx, offset);
-
+
old_set = single_set (insn);
/* First see if this insn remains valid when we make the
{
rtx new_pat = gen_rtx_SET (VOIDmode,
SET_DEST (old_set), new_src);
-
+
if (! validate_change (insn, &PATTERN (insn), new_pat, 0))
SET_SRC (old_set) = new_src;
}
ep->to = ep1->to;
value_p = (targetm.can_eliminate (ep->from, ep->to)
&& ! (ep->to == STACK_POINTER_REGNUM
- && frame_pointer_needed
+ && frame_pointer_needed
&& (! SUPPORTS_STACK_ALIGNMENT
|| ! stack_realign_fp)));
setup_can_eliminate (ep, value_p);
Here is block diagram of LRA passes:
- ---------------------
- | Undo inheritance | --------------- ---------------
+ ---------------------
+ | Undo inheritance | --------------- ---------------
| for spilled pseudos)| | Memory-memory | | New (and old) |
| and splits (for |<----| move coalesce |<-----| pseudos |
| pseudos got the | --------------- | assignment |
- Start | same hard regs) | ---------------
+ Start | same hard regs) | ---------------
| --------------------- ^
V | ---------------- |
----------- V | Update virtual | |
| to memory |<-------| RTL |--------->| transformations |
| substitution | | transfor- | | in EBB scope |
---------------- | mations | -------------------
- | ------------
+ | ------------
V
-------------------------
| Hard regs substitution, |
break;
case CLOBBER:
/* We treat clobber of non-operand hard registers as early
- clobber (the behavior is expected from asm). */
+ clobber (the behavior is expected from asm). */
list = collect_non_operand_hard_regs (&XEXP (op, 0), data,
list, OP_OUT, true);
break;
if (nop > 0)
{
const char *p = recog_data.constraints[0];
-
+
for (p = constraints[0]; *p; p++)
n += *p == ',';
}
int n;
unsigned int uid = INSN_UID (insn);
struct lra_static_insn_data *insn_static_data;
-
+
check_and_expand_insn_recog_data (uid);
if ((data = lra_insn_recog_data[uid]) != NULL
&& data->icode != INSN_CODE (insn))
{
int i;
bool *bp;
-
+
n = insn_static_data->n_alternatives;
bp = data->alternative_enabled_p;
lra_assert (n >= 0 && bp != NULL);
break;
case CLOBBER:
/* We treat clobber of non-operand hard registers as early
- clobber (the behavior is expected from asm). */
+ clobber (the behavior is expected from asm). */
add_regs_to_insn_regno_info (data, XEXP (x, 0), uid, OP_OUT, true);
break;
case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC:
for (i = 0; i < id->insn_static_data->n_operands; i++)
{
rtx op = *id->operand_loc[i];
-
+
if (MEM_P (op)
&& (GET_MODE (op) != BLKmode
|| GET_CODE (XEXP (op, 0)) != SCRATCH)
/* If we're not optimizing, then just err on the safe side. */
if (!optimize)
return true;
-
+
/* First determine which blocks can reach exit via normal paths. */
tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
/* Place the exit block on our worklist. */
EXIT_BLOCK_PTR->flags |= BB_REACHABLE;
*tos++ = EXIT_BLOCK_PTR;
-
+
/* Iterate: find everything reachable from what we've already seen. */
while (tos != worklist)
{
/* Set to 1 while in lra. */
int lra_in_progress;
-/* Start of reload pseudo regnos before the new spill pass. */
+/* Start of reload pseudo regnos before the new spill pass. */
int lra_constraint_new_regno_start;
-/* Inheritance pseudo regnos before the new spill pass. */
+/* Inheritance pseudo regnos before the new spill pass. */
bitmap_head lra_inheritance_pseudos;
-/* Split regnos before the new spill pass. */
+/* Split regnos before the new spill pass. */
bitmap_head lra_split_regs;
/* Reload pseudo regnos before the new assign pass which still can be
- spilled after the assinment pass. */
+ spilled after the assinment pass. */
bitmap_head lra_optional_reload_pseudos;
/* First UID of insns generated before a new spill pass. */
else
{
/* Do coalescing only for regular algorithms. */
- if (! lra_assign () && lra_coalesce ())
+ if (! lra_assign () && lra_coalesce ())
live_p = false;
if (lra_undo_inheritance ())
live_p = false;