+2009-12-09 Nathan Froyd <froydnj@codesourcery.com>
+
+ * config/rs6000/vector.md (absv2sf2, negv2sf2, addv2sf3, subv2sf3,
+ mulv2sf3, divv2sf3): New expanders.
+ * config/rs6000/spe.md (spe_evabs, spe_evand, spe_evaddw, spe_evsubfw,
+ spe_evdivws): Rename to use standard GCC names.
+ * config/rs6000/paired.md (negv2sf, absv2sf2, addv2sf3, subv2sf3,
+ mulv2sf3, divv2sf3): Rename to avoid conflict with the new expanders.
+ * config/rs6000/rs6000.c (bdesc_2arg, bdesc_1arg): Use new CODE_FOR_
+ names for renamed patterns.
+
2009-12-09 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
* config/s390/s390.md ("copysign<mode>3"): Pattern removed.
(UNSPEC_EXTODD_V2SF 333)
])
-(define_insn "negv2sf2"
+(define_insn "paired_negv2sf2"
[(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
(neg:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")))]
"TARGET_PAIRED_FLOAT"
"ps_rsqrte %0,%1"
[(set_attr "type" "fp")])
-(define_insn "absv2sf2"
+(define_insn "paired_absv2sf2"
[(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
(abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")))]
"TARGET_PAIRED_FLOAT"
"ps_nabs %0,%1"
[(set_attr "type" "fp")])
-(define_insn "addv2sf3"
+(define_insn "paired_addv2sf3"
[(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
(plus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f")
(match_operand:V2SF 2 "gpc_reg_operand" "f")))]
"ps_add %0,%1,%2"
[(set_attr "type" "fp")])
-(define_insn "subv2sf3"
+(define_insn "paired_subv2sf3"
[(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
(minus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
(match_operand:V2SF 2 "gpc_reg_operand" "f")))]
"ps_sub %0,%1,%2"
[(set_attr "type" "fp")])
-(define_insn "mulv2sf3"
+(define_insn "paired_mulv2sf3"
[(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
(mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f")
(match_operand:V2SF 2 "gpc_reg_operand" "f")))]
"ps_res %0,%1"
[(set_attr "type" "fp")])
-(define_insn "divv2sf3"
+(define_insn "paired_divv2sf3"
[(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
(div:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
(match_operand:V2SF 2 "gpc_reg_operand" "f")))]
{ MASK_VSX, CODE_FOR_nothing, "__builtin_vec_mul", VSX_BUILTIN_VEC_MUL },
{ MASK_VSX, CODE_FOR_nothing, "__builtin_vec_div", VSX_BUILTIN_VEC_DIV },
- { 0, CODE_FOR_divv2sf3, "__builtin_paired_divv2sf3", PAIRED_BUILTIN_DIVV2SF3 },
- { 0, CODE_FOR_addv2sf3, "__builtin_paired_addv2sf3", PAIRED_BUILTIN_ADDV2SF3 },
- { 0, CODE_FOR_subv2sf3, "__builtin_paired_subv2sf3", PAIRED_BUILTIN_SUBV2SF3 },
- { 0, CODE_FOR_mulv2sf3, "__builtin_paired_mulv2sf3", PAIRED_BUILTIN_MULV2SF3 },
+ { 0, CODE_FOR_paired_divv2sf3, "__builtin_paired_divv2sf3", PAIRED_BUILTIN_DIVV2SF3 },
+ { 0, CODE_FOR_paired_addv2sf3, "__builtin_paired_addv2sf3", PAIRED_BUILTIN_ADDV2SF3 },
+ { 0, CODE_FOR_paired_subv2sf3, "__builtin_paired_subv2sf3", PAIRED_BUILTIN_SUBV2SF3 },
+ { 0, CODE_FOR_paired_mulv2sf3, "__builtin_paired_mulv2sf3", PAIRED_BUILTIN_MULV2SF3 },
{ 0, CODE_FOR_paired_muls0, "__builtin_paired_muls0", PAIRED_BUILTIN_MULS0 },
{ 0, CODE_FOR_paired_muls1, "__builtin_paired_muls1", PAIRED_BUILTIN_MULS1 },
{ 0, CODE_FOR_paired_merge00, "__builtin_paired_merge00", PAIRED_BUILTIN_MERGE00 },
{ 0, CODE_FOR_paired_merge11, "__builtin_paired_merge11", PAIRED_BUILTIN_MERGE11 },
/* Place holder, leave as first spe builtin. */
- { 0, CODE_FOR_spe_evaddw, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW },
- { 0, CODE_FOR_spe_evand, "__builtin_spe_evand", SPE_BUILTIN_EVAND },
+ { 0, CODE_FOR_addv2si3, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW },
+ { 0, CODE_FOR_andv2si3, "__builtin_spe_evand", SPE_BUILTIN_EVAND },
{ 0, CODE_FOR_spe_evandc, "__builtin_spe_evandc", SPE_BUILTIN_EVANDC },
- { 0, CODE_FOR_spe_evdivws, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS },
+ { 0, CODE_FOR_divv2si3, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS },
{ 0, CODE_FOR_spe_evdivwu, "__builtin_spe_evdivwu", SPE_BUILTIN_EVDIVWU },
{ 0, CODE_FOR_spe_eveqv, "__builtin_spe_eveqv", SPE_BUILTIN_EVEQV },
{ 0, CODE_FOR_spe_evfsadd, "__builtin_spe_evfsadd", SPE_BUILTIN_EVFSADD },
{ 0, CODE_FOR_spe_evslw, "__builtin_spe_evslw", SPE_BUILTIN_EVSLW },
{ 0, CODE_FOR_spe_evsrws, "__builtin_spe_evsrws", SPE_BUILTIN_EVSRWS },
{ 0, CODE_FOR_spe_evsrwu, "__builtin_spe_evsrwu", SPE_BUILTIN_EVSRWU },
- { 0, CODE_FOR_spe_evsubfw, "__builtin_spe_evsubfw", SPE_BUILTIN_EVSUBFW },
+ { 0, CODE_FOR_subv2si3, "__builtin_spe_evsubfw", SPE_BUILTIN_EVSUBFW },
/* SPE binary operations expecting a 5-bit unsigned literal. */
{ 0, CODE_FOR_spe_evaddiw, "__builtin_spe_evaddiw", SPE_BUILTIN_EVADDIW },
/* The SPE unary builtins must start with SPE_BUILTIN_EVABS and
end with SPE_BUILTIN_EVSUBFUSIAAW. */
- { 0, CODE_FOR_spe_evabs, "__builtin_spe_evabs", SPE_BUILTIN_EVABS },
+ { 0, CODE_FOR_absv2si2, "__builtin_spe_evabs", SPE_BUILTIN_EVABS },
{ 0, CODE_FOR_spe_evaddsmiaaw, "__builtin_spe_evaddsmiaaw", SPE_BUILTIN_EVADDSMIAAW },
{ 0, CODE_FOR_spe_evaddssiaaw, "__builtin_spe_evaddssiaaw", SPE_BUILTIN_EVADDSSIAAW },
{ 0, CODE_FOR_spe_evaddumiaaw, "__builtin_spe_evaddumiaaw", SPE_BUILTIN_EVADDUMIAAW },
/* Place-holder. Leave as last unary SPE builtin. */
{ 0, CODE_FOR_spe_evsubfusiaaw, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW },
- { 0, CODE_FOR_absv2sf2, "__builtin_paired_absv2sf2", PAIRED_BUILTIN_ABSV2SF2 },
+ { 0, CODE_FOR_paired_absv2sf2, "__builtin_paired_absv2sf2", PAIRED_BUILTIN_ABSV2SF2 },
{ 0, CODE_FOR_nabsv2sf2, "__builtin_paired_nabsv2sf2", PAIRED_BUILTIN_NABSV2SF2 },
- { 0, CODE_FOR_negv2sf2, "__builtin_paired_negv2sf2", PAIRED_BUILTIN_NEGV2SF2 },
+ { 0, CODE_FOR_paired_negv2sf2, "__builtin_paired_negv2sf2", PAIRED_BUILTIN_NEGV2SF2 },
{ 0, CODE_FOR_sqrtv2sf2, "__builtin_paired_sqrtv2sf2", PAIRED_BUILTIN_SQRTV2SF2 },
{ 0, CODE_FOR_resv2sf2, "__builtin_paired_resv2sf2", PAIRED_BUILTIN_RESV2SF2 }
};
;; SPE SIMD instructions
-(define_insn "spe_evabs"
+(define_insn "absv2si2"
[(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
(abs:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")))]
"TARGET_SPE"
[(set_attr "type" "vecsimple")
(set_attr "length" "4")])
-(define_insn "spe_evand"
+(define_insn "andv2si3"
[(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
(and:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
(match_operand:V2SI 2 "gpc_reg_operand" "r")))]
[(set_attr "type" "veccomplex")
(set_attr "length" "4")])
-(define_insn "spe_evaddw"
+(define_insn "addv2si3"
[(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
(plus:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
(match_operand:V2SI 2 "gpc_reg_operand" "r")))]
[(set_attr "type" "veccomplex")
(set_attr "length" "4")])
-(define_insn "spe_evsubfw"
+(define_insn "subv2si3"
[(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
(minus:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
(match_operand:V2SI 2 "gpc_reg_operand" "r")))]
[(set_attr "type" "veccomplex")
(set_attr "length" "4")])
-(define_insn "spe_evdivws"
+(define_insn "divv2si3"
[(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
(div:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
(match_operand:V2SI 2 "gpc_reg_operand" "r")))
(match_operand:VEC_I 2 "vint_operand" "")))]
"TARGET_ALTIVEC"
"")
+\f
+;;; Expanders for vector insn patterns shared between the SPE and TARGET_PAIRED systems.
+
+(define_expand "absv2sf2"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "")
+ (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")))]
+ "TARGET_PAIRED_FLOAT || TARGET_SPE"
+ "")
+
+(define_expand "negv2sf2"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "")
+ (neg:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")))]
+ "TARGET_PAIRED_FLOAT || TARGET_SPE"
+ "")
+
+(define_expand "addv2sf3"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "")
+ (plus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")
+ (match_operand:V2SF 2 "gpc_reg_operand" "")))]
+ "TARGET_PAIRED_FLOAT || TARGET_SPE"
+ "
+{
+ if (TARGET_SPE)
+ {
+ /* We need to make a note that we clobber SPEFSCR. */
+ rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
+
+ XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_PLUS (V2SFmode, operands[1], operands[2]));
+ XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO));
+ emit_insn (par);
+ DONE;
+ }
+}")
+
+(define_expand "subv2sf3"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "")
+ (minus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")
+ (match_operand:V2SF 2 "gpc_reg_operand" "")))]
+ "TARGET_PAIRED_FLOAT || TARGET_SPE"
+ "
+{
+ if (TARGET_SPE)
+ {
+ /* We need to make a note that we clobber SPEFSCR. */
+ rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
+
+ XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_MINUS (V2SFmode, operands[1], operands[2]));
+ XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO));
+ emit_insn (par);
+ DONE;
+ }
+}")
+
+(define_expand "mulv2sf3"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "")
+ (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")
+ (match_operand:V2SF 2 "gpc_reg_operand" "")))]
+ "TARGET_PAIRED_FLOAT || TARGET_SPE"
+ "
+{
+ if (TARGET_SPE)
+ {
+ /* We need to make a note that we clobber SPEFSCR. */
+ rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
+
+ XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_MULT (V2SFmode, operands[1], operands[2]));
+ XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO));
+ emit_insn (par);
+ DONE;
+ }
+}")
+
+(define_expand "divv2sf3"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "")
+ (div:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")
+ (match_operand:V2SF 2 "gpc_reg_operand" "")))]
+ "TARGET_PAIRED_FLOAT || TARGET_SPE"
+ "
+{
+ if (TARGET_SPE)
+ {
+ /* We need to make a note that we clobber SPEFSCR. */
+ rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
+
+ XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_DIV (V2SFmode, operands[1], operands[2]));
+ XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO));
+ emit_insn (par);
+ DONE;
+ }
+}")