From: Franz Sirl Date: Mon, 19 Aug 2002 18:59:32 +0000 (+0000) Subject: [multiple changes] X-Git-Tag: releases/gcc-3.2.1~371 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=71b485f2a2d2d0c8ca718e230f8d8c91f3ce1324;p=thirdparty%2Fgcc.git [multiple changes] 2002-06-24 Jeff Law * flow.c (propagate_one_insn): When removing an insn with a REG_LIBCALL note but not the entire libcall sequence, delete the associated REG_RETVAL note. 2002-06-04 Jeff Law Michael Matz * sched-int.h (struct deps): New field libcall_block_tail_insn. * sched_deps.c (init_deps): Initialize libcall_block_tail_insn. * sched_deps.c (sched_analyze_insn): If libcall_block_tail_insn is set, then mark the current insn as being part of a libcall scheduling group. (sched_analyze): Set and clear libcall_block_tail_insn appropriately. * haifa-sched.c (schedule_block): Do not count USE or CLOBBER insns against the issue rate. 2002-05-30 Jeff Law * flow.c (propagate_one_insn): Revise yesterday's patch. Delete a dead insn with a REG_RETVAL note when the entire libcall is not dead and remove the associated REG_LIBCALL note at the same time. 2002-05-29 Jeff Law * flow.c (propagate_one_insn): Do not remove a dead insn if it contains a REG_RETVAL note. * haifa-sched (sched_analyze): Remove another useless clearing of SCHED_GROUP_P I missed yesterday. 2002-05-28 David Edelsohn Jeff Law * optabs.c (expand_binop): Fix nwords sign warnings. generate pseudo for add_optab. * sched-deps.c (sched_analyze): Do not clear SCHED_GROUP_P. * haifa-sched.c (move_insn): Clear SCHED_GROUP_P after it is used. From-SVN: r56444 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index e52d4280ecb4..46b235518596 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,46 @@ +2002-06-24 Jeff Law + + * flow.c (propagate_one_insn): When removing an insn + with a REG_LIBCALL note but not the entire libcall sequence, + delete the associated REG_RETVAL note. + +2002-06-04 Jeff Law + Michael Matz + + * sched-int.h (struct deps): New field libcall_block_tail_insn. + * sched_deps.c (init_deps): Initialize libcall_block_tail_insn. + * sched_deps.c (sched_analyze_insn): If libcall_block_tail_insn + is set, then mark the current insn as being part of a libcall + scheduling group. + (sched_analyze): Set and clear libcall_block_tail_insn appropriately. + + * haifa-sched.c (schedule_block): Do not count USE or CLOBBER + insns against the issue rate. + +2002-05-30 Jeff Law + + * flow.c (propagate_one_insn): Revise yesterday's patch. Delete + a dead insn with a REG_RETVAL note when the entire libcall is not + dead and remove the associated REG_LIBCALL note at the same time. + +2002-05-29 Jeff Law + + * flow.c (propagate_one_insn): Do not remove a dead insn if it + contains a REG_RETVAL note. + + * haifa-sched (sched_analyze): Remove another useless clearing + of SCHED_GROUP_P I missed yesterday. + +2002-05-28 David Edelsohn + Jeff Law + + * optabs.c (expand_binop): Fix nwords sign warnings. + generate pseudo for add_optab. + + * sched-deps.c (sched_analyze): Do not clear SCHED_GROUP_P. + * haifa-sched.c (move_insn): Clear SCHED_GROUP_P after it is used. + 2002-08-18 Neil Booth PR preprocessor/7602 diff --git a/gcc/flow.c b/gcc/flow.c index 09c1094f1269..8fabc2a97cf9 100644 --- a/gcc/flow.c +++ b/gcc/flow.c @@ -1626,7 +1626,42 @@ propagate_one_insn (pbi, insn) if (libcall_is_dead) prev = propagate_block_delete_libcall ( insn, note); else - propagate_block_delete_insn (pbi->bb, insn); + { + + /* If INSN contains a RETVAL note and is dead, but the libcall + as a whole is not dead, then we want to remove INSN, but + not the whole libcall sequence. + + However, we need to also remove the dangling REG_LIBCALL + note so that we do not have mis-matched LIBCALL/RETVAL + notes. In theory we could find a new location for the + REG_RETVAL note, but it hardly seems worth the effort. + + NOTE at this point will be the RETVAL note if it exists. */ + if (note) + { + rtx libcall_note; + + libcall_note + = find_reg_note (XEXP (note, 0), REG_LIBCALL, NULL_RTX); + remove_note (XEXP (note, 0), libcall_note); + } + + /* Similarly if INSN contains a LIBCALL note, remove the + dangling REG_RETVAL note. */ + note = find_reg_note (insn, REG_LIBCALL, NULL_RTX); + if (note) + { + rtx retval_note; + + retval_note + = find_reg_note (XEXP (note, 0), REG_RETVAL, NULL_RTX); + remove_note (XEXP (note, 0), retval_note); + } + + /* Now delete INSN. */ + propagate_block_delete_insn (pbi->bb, insn); + } return prev; } diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c index 686369d418f4..1d67afb41563 100644 --- a/gcc/haifa-sched.c +++ b/gcc/haifa-sched.c @@ -1601,6 +1601,8 @@ move_insn (insn, last) retval = reemit_notes (insn, insn); else reemit_notes (insn, insn); + /* Consume SCHED_GROUP_P flag. */ + SCHED_GROUP_P (insn) = 0; insn = prev; } @@ -1764,7 +1766,10 @@ schedule_block (b, rgn_n_insns) can_issue_more = (*targetm.sched.variable_issue) (sched_dump, sched_verbose, insn, can_issue_more); - else + /* A naked CLOBBER or USE generates no instruction, so do + not count them against the issue rate. */ + else if (GET_CODE (PATTERN (insn)) != USE + && GET_CODE (PATTERN (insn)) != CLOBBER) can_issue_more--; schedule_insn (insn, &ready, clock_var); diff --git a/gcc/optabs.c b/gcc/optabs.c index 7249ffac4956..d228e58d062b 100644 --- a/gcc/optabs.c +++ b/gcc/optabs.c @@ -1192,9 +1192,9 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) { int i; optab otheroptab = binoptab == add_optab ? sub_optab : add_optab; - unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD; + int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD; rtx carry_in = NULL_RTX, carry_out = NULL_RTX; - rtx xop0, xop1; + rtx xop0, xop1, xtarget; /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG value is one of those, use it. Otherwise, use 1 since it is the @@ -1209,19 +1209,20 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) xop0 = force_reg (mode, op0); xop1 = force_reg (mode, op1); - if (target == 0 || GET_CODE (target) != REG - || target == xop0 || target == xop1) - target = gen_reg_rtx (mode); + xtarget = gen_reg_rtx (mode); + + if (target == 0 || GET_CODE (target) != REG) + target = xtarget; /* Indicate for flow that the entire target reg is being set. */ if (GET_CODE (target) == REG) - emit_insn (gen_rtx_CLOBBER (VOIDmode, target)); + emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget)); /* Do the actual arithmetic. */ for (i = 0; i < nwords; i++) { int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i); - rtx target_piece = operand_subword (target, index, 1, mode); + rtx target_piece = operand_subword (xtarget, index, 1, mode); rtx op0_piece = operand_subword_force (xop0, index, mode); rtx op1_piece = operand_subword_force (xop1, index, mode); rtx x; @@ -1281,7 +1282,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) { if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { - rtx temp = emit_move_insn (target, target); + rtx temp = emit_move_insn (target, xtarget); set_unique_reg_note (temp, REG_EQUAL, @@ -1443,6 +1444,9 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) rtx temp = expand_binop (word_mode, binoptab, op0_low, op1_xhigh, NULL_RTX, 0, OPTAB_DIRECT); + if (!REG_P (product_high)) + product_high = force_reg (word_mode, product_high); + if (temp != 0) temp = expand_binop (word_mode, add_optab, temp, product_high, product_high, 0, next_methods); @@ -1462,6 +1466,8 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) if (temp != 0 && temp != product_high) emit_move_insn (product_high, temp); + emit_move_insn (operand_subword (product, high, 1, mode), product_high); + if (temp != 0) { if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c index 5fb23b76d9b9..5904f918e46a 100644 --- a/gcc/sched-deps.c +++ b/gcc/sched-deps.c @@ -1163,6 +1163,16 @@ sched_analyze_insn (deps, x, insn, loop_notes) CLEAR_REG_SET (reg_pending_clobbers); CLEAR_REG_SET (reg_pending_sets); + /* If we are currently in a libcall scheduling group, then mark the + current insn as being in a scheduling group and that it can not + be moved into a different basic block. */ + + if (deps->libcall_block_tail_insn) + { + set_sched_group_p (insn); + CANT_MOVE (insn) = 1; + } + /* If a post-call group is still open, see if it should remain so. This insn must be a simple move of a hard reg to a pseudo or vice-versa. @@ -1226,6 +1236,8 @@ sched_analyze (deps, head, tail) for (insn = head;; insn = NEXT_INSN (insn)) { + rtx link, end_seq, r0, set, note; + if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN) { /* Clear out the stale LOG_LINKS from flow. */ @@ -1252,9 +1264,6 @@ sched_analyze (deps, head, tail) { int i; - /* Clear out stale SCHED_GROUP_P. */ - SCHED_GROUP_P (insn) = 0; - CANT_MOVE (insn) = 1; /* Clear out the stale LOG_LINKS from flow. */ @@ -1356,6 +1365,46 @@ sched_analyze (deps, head, tail) if (current_sched_info->use_cselib) cselib_process_insn (insn); + + /* Now that we have completed handling INSN, check and see if it is + a CLOBBER beginning a libcall block. If it is, record the + end of the libcall sequence. + + We want to schedule libcall blocks as a unit before reload. While + this restricts scheduling, it preserves the meaning of a libcall + block. + + As a side effect, we may get better code due to decreased register + pressure as well as less chance of a foreign insn appearing in + a libcall block. */ + if (!reload_completed + /* Note we may have nested libcall sequences. We only care about + the outermost libcall sequence. */ + && deps->libcall_block_tail_insn == 0 + /* The sequence must start with a clobber of a register. */ + && GET_CODE (insn) == INSN + && GET_CODE (PATTERN (insn)) == CLOBBER + && (r0 = XEXP (PATTERN (insn), 0), GET_CODE (r0) == REG) + && GET_CODE (XEXP (PATTERN (insn), 0)) == REG + /* The CLOBBER must also have a REG_LIBCALL note attached. */ + && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0 + && (end_seq = XEXP (link, 0)) != 0 + /* The insn referenced by the REG_LIBCALL note must be a + simple nop copy with the same destination as the register + mentioned in the clobber. */ + && (set = single_set (end_seq)) != 0 + && SET_DEST (set) == r0 && SET_SRC (set) == r0 + /* And finally the insn referenced by the REG_LIBCALL must + also contain a REG_EQUAL note and a REG_RETVAL note. */ + && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0 + && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0) + deps->libcall_block_tail_insn = XEXP (link, 0); + + /* If we have reached the end of a libcall block, then close the + block. */ + if (deps->libcall_block_tail_insn == insn) + deps->libcall_block_tail_insn = 0; + if (insn == tail) { if (current_sched_info->use_cselib) @@ -1449,6 +1498,7 @@ init_deps (deps) deps->last_function_call = 0; deps->sched_before_next_call = 0; deps->in_post_call_group_p = false; + deps->libcall_block_tail_insn = 0; } /* Free insn lists found in DEPS. */ diff --git a/gcc/sched-int.h b/gcc/sched-int.h index f5a880809c87..725bf2a3821a 100644 --- a/gcc/sched-int.h +++ b/gcc/sched-int.h @@ -83,6 +83,12 @@ struct deps the call. */ bool in_post_call_group_p; + /* Set to the tail insn of the outermost libcall block. + + When nonzero, we will mark each insn processed by sched_analyze_insn + with SCHED_GROUP_P to ensure libcalls are scheduled as a unit. */ + rtx libcall_block_tail_insn; + /* The maximum register number for the following arrays. Before reload this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */ int max_reg;