(x86_fast_prefix): New global variable.
(x86_arch_always_fancy_math_387): Fix formating.
* i386.h (x86_fast_prefix): Declare
(TARGET_FAST_PREFIX): define.
* i386.md (and to strict_low_part, HI to SI
promoting splitter): Use new macro.
* i386.h (RTX_COSTS): float_extend is not for free for SSE.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@54521
138bc75d-0d04-0410-961f-
82ee72b054a4
+Tue Jun 11 21:53:37 CEST 2002 Jan Hubicka <jh@suse.cz>
+
+ * i386.c (x86_promote_QImode): Set for Athlon
+ (x86_fast_prefix): New global variable.
+ (x86_arch_always_fancy_math_387): Fix formating.
+ * i386.h (x86_fast_prefix): Declare
+ (TARGET_FAST_PREFIX): define.
+ * i386.md (and to strict_low_part, HI to SI
+ promoting splitter): Use new macro.
+
+ * i386.h (RTX_COSTS): float_extend is not for free for SSE.
+
2002-06-11 Zack Weinberg <zack@codesourcery.com>
* Makefile.in (distclean): Delete junk left in testsuite
const int x86_read_modify_write = ~m_PENT;
const int x86_read_modify = ~(m_PENT | m_PPRO);
const int x86_split_long_moves = m_PPRO;
-const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486;
+const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON;
+const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
const int x86_single_stringop = m_386 | m_PENT4;
const int x86_qimode_math = ~(0);
const int x86_promote_qi_regs = 0;
const int x86_prologue_using_move = m_ATHLON | m_PENT4 | m_PPRO;
const int x86_epilogue_using_move = m_ATHLON | m_PENT4 | m_PPRO;
const int x86_decompose_lea = m_PENT4;
-const int x86_arch_always_fancy_math_387 = m_PENT|m_PPRO|m_ATHLON|m_PENT4;
+const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON | m_PENT4;
/* In case the avreage insn count for single function invocation is
lower than this constant, emit fast (but longer) prologue and
extern const int x86_use_loop, x86_use_fiop, x86_use_mov0;
extern const int x86_use_cltd, x86_read_modify_write;
extern const int x86_read_modify, x86_split_long_moves;
-extern const int x86_promote_QImode, x86_single_stringop;
+extern const int x86_promote_QImode, x86_single_stringop, x86_fast_prefix;
extern const int x86_himode_math, x86_qimode_math, x86_promote_qi_regs;
extern const int x86_promote_hi_regs, x86_integer_DFmode_moves;
extern const int x86_add_esp_4, x86_add_esp_8, x86_sub_esp_4, x86_sub_esp_8;
#define TARGET_READ_MODIFY_WRITE (x86_read_modify_write & CPUMASK)
#define TARGET_READ_MODIFY (x86_read_modify & CPUMASK)
#define TARGET_PROMOTE_QImode (x86_promote_QImode & CPUMASK)
+#define TARGET_FAST_PREFIX (x86_fast_prefix & CPUMASK)
#define TARGET_SINGLE_STRINGOP (x86_single_stringop & CPUMASK)
#define TARGET_QIMODE_MATH (x86_qimode_math & CPUMASK)
#define TARGET_HIMODE_MATH (x86_himode_math & CPUMASK)
TOPLEVEL_COSTS_N_INSNS (ix86_cost->add); \
\
case FLOAT_EXTEND: \
- TOPLEVEL_COSTS_N_INSNS (0); \
+ if (!TARGET_SSE_MATH \
+ || !VALID_SSE_REG_MODE (GET_MODE (X))) \
+ TOPLEVEL_COSTS_N_INSNS (0); \
+ break; \
\
egress_rtx_costs: \
break;
(and (match_dup 0)
(const_int -65536)))
(clobber (reg:CC 17))]
- "optimize_size"
+ "optimize_size || (TARGET_FAST_PREFIX && !TARGET_PARTIAL_REG_STALL)"
[(set (strict_low_part (match_dup 1)) (const_int 0))]
"operands[1] = gen_lowpart (HImode, operands[0]);")
(clobber (reg:CC 17))]
"! TARGET_PARTIAL_REG_STALL && reload_completed
&& ((GET_MODE (operands[0]) == HImode
- && (!optimize_size || GET_CODE (operands[2]) != CONST_INT
+ && ((!optimize_size && !TARGET_FAST_PREFIX)
+ || GET_CODE (operands[2]) != CONST_INT
|| CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K')))
|| (GET_MODE (operands[0]) == QImode
&& (TARGET_PROMOTE_QImode || optimize_size)))"
new = emit_insn_after (gen_rtx_SET (VOIDmode, dest, src), insn);
+ /* want_to_gcse_p verifies that this move will be valid. Still this call
+ is mandatory as it may create clobbers required by the pattern. */
+ if (insn_invalid_p (insn))
+ abort ();
+
/* Note the equivalence for local CSE pass. */
if ((note = find_reg_equal_equiv_note (insn)))
eqv = XEXP (note, 0);