+2002-06-24 Jeff Law <law@redhat.com>
+
+ * flow.c (propagate_one_insn): When removing an insn
+ with a REG_LIBCALL note but not the entire libcall sequence,
+ delete the associated REG_RETVAL note.
+
+2002-06-04 Jeff Law <law@redhat.com.
+ David Edelsohn <edelsohn@gnu.org>
+ Michael Matz <matz@kde.org>
+
+ * sched-int.h (struct deps): New field libcall_block_tail_insn.
+ * sched_deps.c (init_deps): Initialize libcall_block_tail_insn.
+ * sched_deps.c (sched_analyze_insn): If libcall_block_tail_insn
+ is set, then mark the current insn as being part of a libcall
+ scheduling group.
+ (sched_analyze): Set and clear libcall_block_tail_insn appropriately.
+
+ * haifa-sched.c (schedule_block): Do not count USE or CLOBBER
+ insns against the issue rate.
+
+2002-05-30 Jeff Law <law@redhat.com>
+
+ * flow.c (propagate_one_insn): Revise yesterday's patch. Delete
+ a dead insn with a REG_RETVAL note when the entire libcall is not
+ dead and remove the associated REG_LIBCALL note at the same time.
+
+2002-05-29 Jeff Law <law@redhat.com>
+
+ * flow.c (propagate_one_insn): Do not remove a dead insn if it
+ contains a REG_RETVAL note.
+
+ * haifa-sched (sched_analyze): Remove another useless clearing
+ of SCHED_GROUP_P I missed yesterday.
+
+2002-05-28 David Edelsohn <edelsohn@gnu.org>
+ Jeff Law <law@redhat.com>
+
+ * optabs.c (expand_binop): Fix nwords sign warnings.
+ generate pseudo for add_optab.
+
+ * sched-deps.c (sched_analyze): Do not clear SCHED_GROUP_P.
+ * haifa-sched.c (move_insn): Clear SCHED_GROUP_P after it is used.
+
2002-08-18 Neil Booth <neil@daikokuya.co.uk>
PR preprocessor/7602
if (libcall_is_dead)
prev = propagate_block_delete_libcall ( insn, note);
else
- propagate_block_delete_insn (pbi->bb, insn);
+ {
+
+ /* If INSN contains a RETVAL note and is dead, but the libcall
+ as a whole is not dead, then we want to remove INSN, but
+ not the whole libcall sequence.
+
+ However, we need to also remove the dangling REG_LIBCALL
+ note so that we do not have mis-matched LIBCALL/RETVAL
+ notes. In theory we could find a new location for the
+ REG_RETVAL note, but it hardly seems worth the effort.
+
+ NOTE at this point will be the RETVAL note if it exists. */
+ if (note)
+ {
+ rtx libcall_note;
+
+ libcall_note
+ = find_reg_note (XEXP (note, 0), REG_LIBCALL, NULL_RTX);
+ remove_note (XEXP (note, 0), libcall_note);
+ }
+
+ /* Similarly if INSN contains a LIBCALL note, remove the
+ dangling REG_RETVAL note. */
+ note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
+ if (note)
+ {
+ rtx retval_note;
+
+ retval_note
+ = find_reg_note (XEXP (note, 0), REG_RETVAL, NULL_RTX);
+ remove_note (XEXP (note, 0), retval_note);
+ }
+
+ /* Now delete INSN. */
+ propagate_block_delete_insn (pbi->bb, insn);
+ }
return prev;
}
retval = reemit_notes (insn, insn);
else
reemit_notes (insn, insn);
+ /* Consume SCHED_GROUP_P flag. */
+ SCHED_GROUP_P (insn) = 0;
insn = prev;
}
can_issue_more =
(*targetm.sched.variable_issue) (sched_dump, sched_verbose,
insn, can_issue_more);
- else
+ /* A naked CLOBBER or USE generates no instruction, so do
+ not count them against the issue rate. */
+ else if (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
can_issue_more--;
schedule_insn (insn, &ready, clock_var);
{
int i;
optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
- unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
+ int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
- rtx xop0, xop1;
+ rtx xop0, xop1, xtarget;
/* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
value is one of those, use it. Otherwise, use 1 since it is the
xop0 = force_reg (mode, op0);
xop1 = force_reg (mode, op1);
- if (target == 0 || GET_CODE (target) != REG
- || target == xop0 || target == xop1)
- target = gen_reg_rtx (mode);
+ xtarget = gen_reg_rtx (mode);
+
+ if (target == 0 || GET_CODE (target) != REG)
+ target = xtarget;
/* Indicate for flow that the entire target reg is being set. */
if (GET_CODE (target) == REG)
- emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
/* Do the actual arithmetic. */
for (i = 0; i < nwords; i++)
{
int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
- rtx target_piece = operand_subword (target, index, 1, mode);
+ rtx target_piece = operand_subword (xtarget, index, 1, mode);
rtx op0_piece = operand_subword_force (xop0, index, mode);
rtx op1_piece = operand_subword_force (xop1, index, mode);
rtx x;
{
if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
{
- rtx temp = emit_move_insn (target, target);
+ rtx temp = emit_move_insn (target, xtarget);
set_unique_reg_note (temp,
REG_EQUAL,
rtx temp = expand_binop (word_mode, binoptab, op0_low, op1_xhigh,
NULL_RTX, 0, OPTAB_DIRECT);
+ if (!REG_P (product_high))
+ product_high = force_reg (word_mode, product_high);
+
if (temp != 0)
temp = expand_binop (word_mode, add_optab, temp, product_high,
product_high, 0, next_methods);
if (temp != 0 && temp != product_high)
emit_move_insn (product_high, temp);
+ emit_move_insn (operand_subword (product, high, 1, mode), product_high);
+
if (temp != 0)
{
if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
CLEAR_REG_SET (reg_pending_clobbers);
CLEAR_REG_SET (reg_pending_sets);
+ /* If we are currently in a libcall scheduling group, then mark the
+ current insn as being in a scheduling group and that it can not
+ be moved into a different basic block. */
+
+ if (deps->libcall_block_tail_insn)
+ {
+ set_sched_group_p (insn);
+ CANT_MOVE (insn) = 1;
+ }
+
/* If a post-call group is still open, see if it should remain so.
This insn must be a simple move of a hard reg to a pseudo or
vice-versa.
for (insn = head;; insn = NEXT_INSN (insn))
{
+ rtx link, end_seq, r0, set, note;
+
if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
{
/* Clear out the stale LOG_LINKS from flow. */
{
int i;
- /* Clear out stale SCHED_GROUP_P. */
- SCHED_GROUP_P (insn) = 0;
-
CANT_MOVE (insn) = 1;
/* Clear out the stale LOG_LINKS from flow. */
if (current_sched_info->use_cselib)
cselib_process_insn (insn);
+
+ /* Now that we have completed handling INSN, check and see if it is
+ a CLOBBER beginning a libcall block. If it is, record the
+ end of the libcall sequence.
+
+ We want to schedule libcall blocks as a unit before reload. While
+ this restricts scheduling, it preserves the meaning of a libcall
+ block.
+
+ As a side effect, we may get better code due to decreased register
+ pressure as well as less chance of a foreign insn appearing in
+ a libcall block. */
+ if (!reload_completed
+ /* Note we may have nested libcall sequences. We only care about
+ the outermost libcall sequence. */
+ && deps->libcall_block_tail_insn == 0
+ /* The sequence must start with a clobber of a register. */
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == CLOBBER
+ && (r0 = XEXP (PATTERN (insn), 0), GET_CODE (r0) == REG)
+ && GET_CODE (XEXP (PATTERN (insn), 0)) == REG
+ /* The CLOBBER must also have a REG_LIBCALL note attached. */
+ && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
+ && (end_seq = XEXP (link, 0)) != 0
+ /* The insn referenced by the REG_LIBCALL note must be a
+ simple nop copy with the same destination as the register
+ mentioned in the clobber. */
+ && (set = single_set (end_seq)) != 0
+ && SET_DEST (set) == r0 && SET_SRC (set) == r0
+ /* And finally the insn referenced by the REG_LIBCALL must
+ also contain a REG_EQUAL note and a REG_RETVAL note. */
+ && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
+ && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
+ deps->libcall_block_tail_insn = XEXP (link, 0);
+
+ /* If we have reached the end of a libcall block, then close the
+ block. */
+ if (deps->libcall_block_tail_insn == insn)
+ deps->libcall_block_tail_insn = 0;
+
if (insn == tail)
{
if (current_sched_info->use_cselib)
deps->last_function_call = 0;
deps->sched_before_next_call = 0;
deps->in_post_call_group_p = false;
+ deps->libcall_block_tail_insn = 0;
}
/* Free insn lists found in DEPS. */
the call. */
bool in_post_call_group_p;
+ /* Set to the tail insn of the outermost libcall block.
+
+ When nonzero, we will mark each insn processed by sched_analyze_insn
+ with SCHED_GROUP_P to ensure libcalls are scheduled as a unit. */
+ rtx libcall_block_tail_insn;
+
/* The maximum register number for the following arrays. Before reload
this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
int max_reg;