+2014-12-10 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ Backport from mainline
+ 2014-09-02 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * config/rs6000/rs6000-builtin.def (XVCVSXDDP_SCALE): New
+ built-in definition.
+ (XVCVUXDDP_SCALE): Likewise.
+ (XVCVDPSXDS_SCALE): Likewise.
+ (XVCVDPUXDS_SCALE): Likewise.
+ * config/rs6000/rs6000-c.c (altivec_overloaded_builtins): Add
+ entries for VSX_BUILTIN_XVCVSXDDP_SCALE,
+ VSX_BUILTIN_XVCVUXDDP_SCALE, VSX_BUILTIN_XVCVDPSXDS_SCALE, and
+ VSX_BUILTIN_XVCVDPUXDS_SCALE.
+ * config/rs6000/rs6000-protos.h (rs6000_scale_v2df): New
+ prototype.
+ * config/rs6000/rs6000.c (real.h): New include.
+ (rs6000_scale_v2df): New function.
+ * config/rs6000/vsx.md (UNSPEC_VSX_XVCVSXDDP): New unspec.
+ (UNSPEC_VSX_XVCVUXDDP): Likewise.
+ (UNSPEC_VSX_XVCVDPSXDS): Likewise.
+ (UNSPEC_VSX_XVCVDPUXDS): Likewise.
+ (vsx_xvcvsxddp_scale): New define_expand.
+ (vsx_xvcvsxddp): New define_insn.
+ (vsx_xvcvuxddp_scale): New define_expand.
+ (vsx_xvcvuxddp): New define_insn.
+ (vsx_xvcvdpsxds_scale): New define_expand.
+ (vsx_xvcvdpsxds): New define_insn.
+ (vsx_xvcvdpuxds_scale): New define_expand.
+ (vsx_xvcvdpuxds): New define_insn.
+ * doc/extend.texi (vec_ctf): Add new prototypes.
+ (vec_cts): Likewise.
+ (vec_ctu): Likewise.
+ (vec_splat): Likewise.
+ (vec_div): Likewise.
+ (vec_mul): Likewise.
+
+ Backport from mainline
+ 2014-08-28 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * config/rs6000/altivec.h (vec_xl): New #define.
+ (vec_xst): Likewise.
+ * config/rs6000/rs6000-builtin.def (XXSPLTD_V2DF): New built-in.
+ (XXSPLTD_V2DI): Likewise.
+ (DIV_V2DI): Likewise.
+ (UDIV_V2DI): Likewise.
+ (MUL_V2DI): Likewise.
+ * config/rs6000/rs6000-c.c (altivec_overloaded_builtins): Add
+ entries for VSX_BUILTIN_XVRDPI, VSX_BUILTIN_DIV_V2DI,
+ VSX_BUILTIN_UDIV_V2DI, VSX_BUILTIN_MUL_V2DI,
+ VSX_BUILTIN_XXSPLTD_V2DF, and VSX_BUILTIN_XXSPLTD_V2DI).
+ * config/rs6000/vsx.md (UNSPEC_VSX_XXSPLTD): New unspec.
+ (UNSPEC_VSX_DIVSD): Likewise.
+ (UNSPEC_VSX_DIVUD): Likewise.
+ (UNSPEC_VSX_MULSD): Likewise.
+ (vsx_mul_v2di): New insn-and-split.
+ (vsx_div_v2di): Likewise.
+ (vsx_udiv_v2di): Likewise.
+ (vsx_xxspltd_<mode>): New insn.
+
+ Backport from mainline
+ 2014-08-20 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * config/rs6000/altivec.h (vec_cpsgn): New #define.
+ (vec_mergee): Likewise.
+ (vec_mergeo): Likewise.
+ (vec_cntlz): Likewise.
+ * config/rs600/rs6000-c.c (altivec_overloaded_builtins): Add new
+ entries for VEC_AND, VEC_ANDC, VEC_MERGEH, VEC_MERGEL, VEC_NOR,
+ VEC_OR, VEC_PACKSU, VEC_XOR, VEC_PERM, VEC_SEL, VEC_VCMPGT_P,
+ VMRGEW, and VMRGOW.
+ * doc/extend.texi: Document various forms of vec_cpsgn,
+ vec_splats, vec_and, vec_andc, vec_mergeh, vec_mergel, vec_nor,
+ vec_or, vec_perm, vec_sel, vec_sub, vec_xor, vec_all_eq,
+ vec_all_ge, vec_all_gt, vec_all_le, vec_all_lt, vec_all_ne,
+ vec_any_eq, vec_any_ge, vec_any_gt, vec_any_le, vec_any_lt,
+ vec_any_ne, vec_mergee, vec_mergeo, vec_packsu, and vec_cntlz.
+
+ Backport from mainline
+ 2014-07-20 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * config/rs6000/altivec.md (unspec enum): Fix typo in UNSPEC_VSLDOI.
+ (altivec_vsldoi_<mode>): Likewise.
+
2014-12-10 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
Backport from mainline:
#define vec_vcfux __builtin_vec_vcfux
#define vec_cts __builtin_vec_cts
#define vec_ctu __builtin_vec_ctu
+#define vec_cpsgn __builtin_vec_copysign
#define vec_expte __builtin_vec_expte
#define vec_floor __builtin_vec_floor
#define vec_loge __builtin_vec_loge
#define vec_lvsl __builtin_vec_lvsl
#define vec_lvsr __builtin_vec_lvsr
#define vec_max __builtin_vec_max
+#define vec_mergee __builtin_vec_vmrgew
#define vec_mergeh __builtin_vec_mergeh
#define vec_mergel __builtin_vec_mergel
+#define vec_mergeo __builtin_vec_vmrgow
#define vec_min __builtin_vec_min
#define vec_mladd __builtin_vec_mladd
#define vec_msum __builtin_vec_msum
#define vec_sqrt __builtin_vec_sqrt
#define vec_vsx_ld __builtin_vec_vsx_ld
#define vec_vsx_st __builtin_vec_vsx_st
+#define vec_xl __builtin_vec_vsx_ld
+#define vec_xst __builtin_vec_vsx_st
/* Note, xxsldi and xxpermdi were added as __builtin_vsx_<xxx> functions
instead of __builtin_vec_<xxx> */
#define vec_vadduqm __builtin_vec_vadduqm
#define vec_vbpermq __builtin_vec_vbpermq
#define vec_vclz __builtin_vec_vclz
+#define vec_cntlz __builtin_vec_vclz
#define vec_vclzb __builtin_vec_vclzb
#define vec_vclzd __builtin_vec_vclzd
#define vec_vclzh __builtin_vec_vclzh
UNSPEC_VCTSXS
UNSPEC_VLOGEFP
UNSPEC_VEXPTEFP
- UNSPEC_VLSDOI
+ UNSPEC_VSLDOI
UNSPEC_VUNPACK_HI_SIGN
UNSPEC_VUNPACK_LO_SIGN
UNSPEC_VUNPACK_HI_SIGN_DIRECT
(unspec:VM [(match_operand:VM 1 "register_operand" "v")
(match_operand:VM 2 "register_operand" "v")
(match_operand:QI 3 "immediate_operand" "i")]
- UNSPEC_VLSDOI))]
+ UNSPEC_VSLDOI))]
"TARGET_ALTIVEC"
"vsldoi %0,%1,%2,%3"
[(set_attr "type" "vecperm")])
BU_VSX_2 (VEC_MERGEL_V2DI, "mergel_2di", CONST, vsx_mergel_v2di)
BU_VSX_2 (VEC_MERGEH_V2DF, "mergeh_2df", CONST, vsx_mergeh_v2df)
BU_VSX_2 (VEC_MERGEH_V2DI, "mergeh_2di", CONST, vsx_mergeh_v2di)
+BU_VSX_2 (XXSPLTD_V2DF, "xxspltd_2df", CONST, vsx_xxspltd_v2df)
+BU_VSX_2 (XXSPLTD_V2DI, "xxspltd_2di", CONST, vsx_xxspltd_v2di)
+BU_VSX_2 (DIV_V2DI, "div_2di", CONST, vsx_div_v2di)
+BU_VSX_2 (UDIV_V2DI, "udiv_2di", CONST, vsx_udiv_v2di)
+BU_VSX_2 (MUL_V2DI, "mul_2di", CONST, vsx_mul_v2di)
+
+BU_VSX_2 (XVCVSXDDP_SCALE, "xvcvsxddp_scale", CONST, vsx_xvcvsxddp_scale)
+BU_VSX_2 (XVCVUXDDP_SCALE, "xvcvuxddp_scale", CONST, vsx_xvcvuxddp_scale)
+BU_VSX_2 (XVCVDPSXDS_SCALE, "xvcvdpsxds_scale", CONST, vsx_xvcvdpsxds_scale)
+BU_VSX_2 (XVCVDPUXDS_SCALE, "xvcvdpuxds_scale", CONST, vsx_xvcvdpuxds_scale)
/* VSX abs builtin functions. */
BU_VSX_A (XVABSDP, "xvabsdp", CONST, absv2df2)
RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 },
{ ALTIVEC_BUILTIN_VEC_ROUND, ALTIVEC_BUILTIN_VRFIN,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ROUND, VSX_BUILTIN_XVRDPI,
+ RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 },
{ ALTIVEC_BUILTIN_VEC_RECIP, ALTIVEC_BUILTIN_VRECIPFP,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
{ ALTIVEC_BUILTIN_VEC_RECIP, VSX_BUILTIN_RECIP_V2DF,
RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
{ ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
{ ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
{ ALTIVEC_BUILTIN_VEC_CTF, ALTIVEC_BUILTIN_VCFSX,
RS6000_BTI_V4SF, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTF, VSX_BUILTIN_XVCVSXDDP_SCALE,
+ RS6000_BTI_V2DF, RS6000_BTI_V2DI, RS6000_BTI_INTSI, 0},
+ { ALTIVEC_BUILTIN_VEC_CTF, VSX_BUILTIN_XVCVUXDDP_SCALE,
+ RS6000_BTI_V2DF, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, 0},
{ ALTIVEC_BUILTIN_VEC_VCFSX, ALTIVEC_BUILTIN_VCFSX,
RS6000_BTI_V4SF, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
{ ALTIVEC_BUILTIN_VEC_VCFUX, ALTIVEC_BUILTIN_VCFUX,
RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
{ ALTIVEC_BUILTIN_VEC_CTS, ALTIVEC_BUILTIN_VCTSXS,
RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTS, VSX_BUILTIN_XVCVDPSXDS_SCALE,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DF, RS6000_BTI_INTSI, 0 },
{ ALTIVEC_BUILTIN_VEC_CTU, ALTIVEC_BUILTIN_VCTUXS,
RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTU, VSX_BUILTIN_XVCVDPUXDS_SCALE,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_V2DF, RS6000_BTI_INTSI, 0 },
{ VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_XVDIVSP,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
{ VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_XVDIVDP,
RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 },
+ { VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_DIV_V2DI,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_UDIV_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DF,
RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 },
{ ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DI,
RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 },
{ ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI,
RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI,
+ RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
{ ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 },
{ ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI,
RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI,
+ RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
{ ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
{ VSX_BUILTIN_VEC_MUL, VSX_BUILTIN_XVMULDP,
RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 },
+ { VSX_BUILTIN_VEC_MUL, VSX_BUILTIN_MUL_V2DI,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { VSX_BUILTIN_VEC_MUL, VSX_BUILTIN_MUL_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULEUB,
RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
{ ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULESB,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
{ ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
{ ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
{ ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
{ ALTIVEC_BUILTIN_VEC_PACKSU, P8V_BUILTIN_VPKSDUS,
RS6000_BTI_unsigned_V4SI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, P8V_BUILTIN_VPKSDUS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_VPKSWUS, ALTIVEC_BUILTIN_VPKSWUS,
RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
{ ALTIVEC_BUILTIN_VEC_VPKSHUS, ALTIVEC_BUILTIN_VPKSHUS,
RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
{ ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, VSX_BUILTIN_XXSPLTD_V2DF,
+ RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, VSX_BUILTIN_XXSPLTD_V2DI,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, VSX_BUILTIN_XXSPLTD_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, VSX_BUILTIN_XXSPLTD_V2DI,
+ RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, 0 },
{ ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
{ ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
{ ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_unsigned_V16QI },
{ ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_2DI,
RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V16QI },
{ ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SF,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI },
{ ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI,
RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V2DI },
{ ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI,
RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_V2DI },
{ ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SF,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI },
{ ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SF,
RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
{ ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P8V_BUILTIN_VCMPGTUD_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI },
+ { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P8V_BUILTIN_VCMPGTUD_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI },
+ { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P8V_BUILTIN_VCMPGTUD_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI },
+ { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P8V_BUILTIN_VCMPGTSD_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI },
+ { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P8V_BUILTIN_VCMPGTSD_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI },
+ { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P8V_BUILTIN_VCMPGTSD_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DI, RS6000_BTI_V2DI },
{ ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTFP_P,
RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
{ ALTIVEC_BUILTIN_VEC_VCMPGT_P, VSX_BUILTIN_XVCMPGTDP_P,
{ P8V_BUILTIN_VEC_VMRGEW, P8V_BUILTIN_VMRGEW,
RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI,
RS6000_BTI_unsigned_V4SI, 0 },
+ { P8V_BUILTIN_VEC_VMRGEW, P8V_BUILTIN_VMRGEW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
{ P8V_BUILTIN_VEC_VMRGOW, P8V_BUILTIN_VMRGOW,
RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
{ P8V_BUILTIN_VEC_VMRGOW, P8V_BUILTIN_VMRGOW,
RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI,
RS6000_BTI_unsigned_V4SI, 0 },
+ { P8V_BUILTIN_VEC_VMRGOW, P8V_BUILTIN_VMRGOW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
{ P8V_BUILTIN_VEC_VPOPCNT, P8V_BUILTIN_VPOPCNTB,
RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 },
extern void altivec_expand_stvex_be (rtx, rtx, enum machine_mode, unsigned);
extern void rs6000_expand_extract_even (rtx, rtx, rtx);
extern void rs6000_expand_interleave (rtx, rtx, rtx, bool);
+extern void rs6000_scale_v2df (rtx, rtx, int);
extern void build_mask64_2_operands (rtx, rtx *);
extern int expand_block_clear (rtx[]);
extern int expand_block_move (rtx[]);
#include "opts.h"
#include "tree-vectorizer.h"
#include "dumpfile.h"
+#include "real.h"
#if TARGET_XCOFF
#include "xcoffout.h" /* get declarations of xcoff_*_section_name */
#endif
rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
}
+/* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
+void
+rs6000_scale_v2df (rtx tgt, rtx src, int scale)
+{
+ HOST_WIDE_INT hwi_scale (scale);
+ REAL_VALUE_TYPE r_pow;
+ rtvec v = rtvec_alloc (2);
+ rtx elt;
+ rtx scale_vec = gen_reg_rtx (V2DFmode);
+ (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
+ elt = CONST_DOUBLE_FROM_REAL_VALUE (r_pow, DFmode);
+ RTVEC_ELT (v, 0) = elt;
+ RTVEC_ELT (v, 1) = elt;
+ rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
+ emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
+}
+
/* Return an RTX representing where to find the function value of a
function returning MODE. */
static rtx
UNSPEC_VSX_ROUND_IC
UNSPEC_VSX_SLDWI
UNSPEC_VSX_XXSPLTW
+ UNSPEC_VSX_XXSPLTD
+ UNSPEC_VSX_DIVSD
+ UNSPEC_VSX_DIVUD
+ UNSPEC_VSX_MULSD
+ UNSPEC_VSX_XVCVSXDDP
+ UNSPEC_VSX_XVCVUXDDP
+ UNSPEC_VSX_XVCVDPSXDS
+ UNSPEC_VSX_XVCVDPUXDS
])
;; VSX moves
[(set_attr "type" "<VStype_simple>")
(set_attr "fp_type" "<VSfptype_mul>")])
+; Emulate vector with scalar for vec_mul in V2DImode
+(define_insn_and_split "vsx_mul_v2di"
+ [(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
+ (unspec:V2DI [(match_operand:V2DI 1 "vsx_register_operand" "wa")
+ (match_operand:V2DI 2 "vsx_register_operand" "wa")]
+ UNSPEC_VSX_MULSD))]
+ "VECTOR_MEM_VSX_P (V2DImode)"
+ "#"
+ "VECTOR_MEM_VSX_P (V2DImode) && !reload_completed && !reload_in_progress"
+ [(const_int 0)]
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx op2 = operands[2];
+ rtx op3 = gen_reg_rtx (DImode);
+ rtx op4 = gen_reg_rtx (DImode);
+ rtx op5 = gen_reg_rtx (DImode);
+ emit_insn (gen_vsx_extract_v2di (op3, op1, GEN_INT (0)));
+ emit_insn (gen_vsx_extract_v2di (op4, op2, GEN_INT (0)));
+ emit_insn (gen_muldi3 (op5, op3, op4));
+ emit_insn (gen_vsx_extract_v2di (op3, op1, GEN_INT (1)));
+ emit_insn (gen_vsx_extract_v2di (op4, op2, GEN_INT (1)));
+ emit_insn (gen_muldi3 (op3, op3, op4));
+ emit_insn (gen_vsx_concat_v2di (op0, op5, op3));
+}"
+ [(set_attr "type" "vecdouble")])
+
(define_insn "*vsx_div<mode>3"
[(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>,?<VSa>")
(div:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>,<VSa>")
[(set_attr "type" "<VStype_div>")
(set_attr "fp_type" "<VSfptype_div>")])
+; Emulate vector with scalar for vec_div in V2DImode
+(define_insn_and_split "vsx_div_v2di"
+ [(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
+ (unspec:V2DI [(match_operand:V2DI 1 "vsx_register_operand" "wa")
+ (match_operand:V2DI 2 "vsx_register_operand" "wa")]
+ UNSPEC_VSX_DIVSD))]
+ "VECTOR_MEM_VSX_P (V2DImode)"
+ "#"
+ "VECTOR_MEM_VSX_P (V2DImode) && !reload_completed && !reload_in_progress"
+ [(const_int 0)]
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx op2 = operands[2];
+ rtx op3 = gen_reg_rtx (DImode);
+ rtx op4 = gen_reg_rtx (DImode);
+ rtx op5 = gen_reg_rtx (DImode);
+ emit_insn (gen_vsx_extract_v2di (op3, op1, GEN_INT (0)));
+ emit_insn (gen_vsx_extract_v2di (op4, op2, GEN_INT (0)));
+ emit_insn (gen_divdi3 (op5, op3, op4));
+ emit_insn (gen_vsx_extract_v2di (op3, op1, GEN_INT (1)));
+ emit_insn (gen_vsx_extract_v2di (op4, op2, GEN_INT (1)));
+ emit_insn (gen_divdi3 (op3, op3, op4));
+ emit_insn (gen_vsx_concat_v2di (op0, op5, op3));
+}"
+ [(set_attr "type" "vecdiv")])
+
+(define_insn_and_split "vsx_udiv_v2di"
+ [(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
+ (unspec:V2DI [(match_operand:V2DI 1 "vsx_register_operand" "wa")
+ (match_operand:V2DI 2 "vsx_register_operand" "wa")]
+ UNSPEC_VSX_DIVUD))]
+ "VECTOR_MEM_VSX_P (V2DImode)"
+ "#"
+ "VECTOR_MEM_VSX_P (V2DImode) && !reload_completed && !reload_in_progress"
+ [(const_int 0)]
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx op2 = operands[2];
+ rtx op3 = gen_reg_rtx (DImode);
+ rtx op4 = gen_reg_rtx (DImode);
+ rtx op5 = gen_reg_rtx (DImode);
+ emit_insn (gen_vsx_extract_v2di (op3, op1, GEN_INT (0)));
+ emit_insn (gen_vsx_extract_v2di (op4, op2, GEN_INT (0)));
+ emit_insn (gen_udivdi3 (op5, op3, op4));
+ emit_insn (gen_vsx_extract_v2di (op3, op1, GEN_INT (1)));
+ emit_insn (gen_vsx_extract_v2di (op4, op2, GEN_INT (1)));
+ emit_insn (gen_udivdi3 (op3, op3, op4));
+ emit_insn (gen_vsx_concat_v2di (op0, op5, op3));
+}"
+ [(set_attr "type" "vecdiv")])
+
;; *tdiv* instruction returning the FG flag
(define_expand "vsx_tdiv<mode>3_fg"
[(set (match_dup 3)
"xscvspdpn %x0,%x1"
[(set_attr "type" "fp")])
+;; Convert and scale (used by vec_ctf, vec_cts, vec_ctu for double/long long)
+
+(define_expand "vsx_xvcvsxddp_scale"
+ [(match_operand:V2DF 0 "vsx_register_operand" "")
+ (match_operand:V2DI 1 "vsx_register_operand" "")
+ (match_operand:QI 2 "immediate_operand" "")]
+ "VECTOR_UNIT_VSX_P (V2DFmode)"
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ int scale = INTVAL(operands[2]);
+ emit_insn (gen_vsx_xvcvsxddp (op0, op1));
+ if (scale != 0)
+ rs6000_scale_v2df (op0, op0, -scale);
+ DONE;
+})
+
+(define_insn "vsx_xvcvsxddp"
+ [(set (match_operand:V2DF 0 "vsx_register_operand" "=wa")
+ (unspec:V2DF [(match_operand:V2DI 1 "vsx_register_operand" "wa")]
+ UNSPEC_VSX_XVCVSXDDP))]
+ "VECTOR_UNIT_VSX_P (V2DFmode)"
+ "xvcvsxddp %x0,%x1"
+ [(set_attr "type" "vecdouble")])
+
+(define_expand "vsx_xvcvuxddp_scale"
+ [(match_operand:V2DF 0 "vsx_register_operand" "")
+ (match_operand:V2DI 1 "vsx_register_operand" "")
+ (match_operand:QI 2 "immediate_operand" "")]
+ "VECTOR_UNIT_VSX_P (V2DFmode)"
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ int scale = INTVAL(operands[2]);
+ emit_insn (gen_vsx_xvcvuxddp (op0, op1));
+ if (scale != 0)
+ rs6000_scale_v2df (op0, op0, -scale);
+ DONE;
+})
+
+(define_insn "vsx_xvcvuxddp"
+ [(set (match_operand:V2DF 0 "vsx_register_operand" "=wa")
+ (unspec:V2DF [(match_operand:V2DI 1 "vsx_register_operand" "wa")]
+ UNSPEC_VSX_XVCVUXDDP))]
+ "VECTOR_UNIT_VSX_P (V2DFmode)"
+ "xvcvuxddp %x0,%x1"
+ [(set_attr "type" "vecdouble")])
+
+(define_expand "vsx_xvcvdpsxds_scale"
+ [(match_operand:V2DI 0 "vsx_register_operand" "")
+ (match_operand:V2DF 1 "vsx_register_operand" "")
+ (match_operand:QI 2 "immediate_operand" "")]
+ "VECTOR_UNIT_VSX_P (V2DFmode)"
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx tmp = gen_reg_rtx (V2DFmode);
+ int scale = INTVAL(operands[2]);
+ if (scale != 0)
+ rs6000_scale_v2df (tmp, op1, scale);
+ emit_insn (gen_vsx_xvcvdpsxds (op0, tmp));
+ DONE;
+})
+
+(define_insn "vsx_xvcvdpsxds"
+ [(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
+ (unspec:V2DI [(match_operand:V2DF 1 "vsx_register_operand" "wa")]
+ UNSPEC_VSX_XVCVDPSXDS))]
+ "VECTOR_UNIT_VSX_P (V2DFmode)"
+ "xvcvdpsxds %x0,%x1"
+ [(set_attr "type" "vecdouble")])
+
+(define_expand "vsx_xvcvdpuxds_scale"
+ [(match_operand:V2DI 0 "vsx_register_operand" "")
+ (match_operand:V2DF 1 "vsx_register_operand" "")
+ (match_operand:QI 2 "immediate_operand" "")]
+ "VECTOR_UNIT_VSX_P (V2DFmode)"
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx tmp = gen_reg_rtx (V2DFmode);
+ int scale = INTVAL(operands[2]);
+ if (scale != 0)
+ rs6000_scale_v2df (tmp, op1, scale);
+ emit_insn (gen_vsx_xvcvdpuxds (op0, tmp));
+ DONE;
+})
+
+(define_insn "vsx_xvcvdpuxds"
+ [(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
+ (unspec:V2DI [(match_operand:V2DF 1 "vsx_register_operand" "wa")]
+ UNSPEC_VSX_XVCVDPUXDS))]
+ "VECTOR_UNIT_VSX_P (V2DFmode)"
+ "xvcvdpuxds %x0,%x1"
+ [(set_attr "type" "vecdouble")])
+
;; Convert from 64-bit to 32-bit types
;; Note, favor the Altivec registers since the usual use of these instructions
;; is in vector converts and we need to use the Altivec vperm instruction.
"xxspltw %x0,%x1,%2"
[(set_attr "type" "vecperm")])
+;; V2DF/V2DI splat for use by vec_splat builtin
+(define_insn "vsx_xxspltd_<mode>"
+ [(set (match_operand:VSX_D 0 "vsx_register_operand" "=wa")
+ (unspec:VSX_D [(match_operand:VSX_D 1 "vsx_register_operand" "wa")
+ (match_operand:QI 2 "u5bit_cint_operand" "i")]
+ UNSPEC_VSX_XXSPLTD))]
+ "VECTOR_MEM_VSX_P (<MODE>mode)"
+{
+ if ((VECTOR_ELT_ORDER_BIG && INTVAL (operands[2]) == 0)
+ || (!VECTOR_ELT_ORDER_BIG && INTVAL (operands[2]) == 1))
+ return "xxpermdi %x0,%x1,%x1,0";
+ else
+ return "xxpermdi %x0,%x1,%x1,3";
+}
+ [(set_attr "type" "vecperm")])
+
;; V4SF/V4SI interleave
(define_insn "vsx_xxmrghw_<mode>"
[(set (match_operand:VSX_W 0 "vsx_register_operand" "=wf,?<VSa>")
vector bool int vec_cmplt (vector signed int, vector signed int);
vector bool int vec_cmplt (vector float, vector float);
+vector float vec_cpsgn (vector float, vector float);
+
vector float vec_ctf (vector unsigned int, const int);
vector float vec_ctf (vector signed int, const int);
+vector double vec_ctf (vector unsigned long, const int);
+vector double vec_ctf (vector signed long, const int);
vector float vec_vcfsx (vector signed int, const int);
vector float vec_vcfux (vector unsigned int, const int);
vector signed int vec_cts (vector float, const int);
+vector signed long vec_cts (vector double, const int);
vector unsigned int vec_ctu (vector float, const int);
+vector unsigned long vec_ctu (vector double, const int);
void vec_dss (const int);
vector signed int vec_splat (vector signed int, const int);
vector unsigned int vec_splat (vector unsigned int, const int);
vector bool int vec_splat (vector bool int, const int);
+vector signed long vec_splat (vector signed long, const int);
+vector unsigned long vec_splat (vector unsigned long, const int);
+
+vector signed char vec_splats (signed char);
+vector unsigned char vec_splats (unsigned char);
+vector signed short vec_splats (signed short);
+vector unsigned short vec_splats (unsigned short);
+vector signed int vec_splats (signed int);
+vector unsigned int vec_splats (unsigned int);
+vector float vec_splats (float);
vector float vec_vspltw (vector float, const int);
vector signed int vec_vspltw (vector signed int, const int);
vector double vec_and (vector double, vector double);
vector double vec_and (vector double, vector bool long);
vector double vec_and (vector bool long, vector double);
+vector long vec_and (vector long, vector long);
+vector long vec_and (vector long, vector bool long);
+vector long vec_and (vector bool long, vector long);
+vector unsigned long vec_and (vector unsigned long, vector unsigned long);
+vector unsigned long vec_and (vector unsigned long, vector bool long);
+vector unsigned long vec_and (vector bool long, vector unsigned long);
vector double vec_andc (vector double, vector double);
vector double vec_andc (vector double, vector bool long);
vector double vec_andc (vector bool long, vector double);
+vector long vec_andc (vector long, vector long);
+vector long vec_andc (vector long, vector bool long);
+vector long vec_andc (vector bool long, vector long);
+vector unsigned long vec_andc (vector unsigned long, vector unsigned long);
+vector unsigned long vec_andc (vector unsigned long, vector bool long);
+vector unsigned long vec_andc (vector bool long, vector unsigned long);
vector double vec_ceil (vector double);
vector bool long vec_cmpeq (vector double, vector double);
vector bool long vec_cmpge (vector double, vector double);
vector bool long vec_cmpgt (vector double, vector double);
vector bool long vec_cmple (vector double, vector double);
vector bool long vec_cmplt (vector double, vector double);
+vector double vec_cpsgn (vector double, vector double);
vector float vec_div (vector float, vector float);
vector double vec_div (vector double, vector double);
+vector long vec_div (vector long, vector long);
+vector unsigned long vec_div (vector unsigned long, vector unsigned long);
vector double vec_floor (vector double);
vector double vec_ld (int, const vector double *);
vector double vec_ld (int, const double *);
vector unsigned char vec_lvsr (int, const volatile double *);
vector double vec_madd (vector double, vector double, vector double);
vector double vec_max (vector double, vector double);
+vector signed long vec_mergeh (vector signed long, vector signed long);
+vector signed long vec_mergeh (vector signed long, vector bool long);
+vector signed long vec_mergeh (vector bool long, vector signed long);
+vector unsigned long vec_mergeh (vector unsigned long, vector unsigned long);
+vector unsigned long vec_mergeh (vector unsigned long, vector bool long);
+vector unsigned long vec_mergeh (vector bool long, vector unsigned long);
+vector signed long vec_mergel (vector signed long, vector signed long);
+vector signed long vec_mergel (vector signed long, vector bool long);
+vector signed long vec_mergel (vector bool long, vector signed long);
+vector unsigned long vec_mergel (vector unsigned long, vector unsigned long);
+vector unsigned long vec_mergel (vector unsigned long, vector bool long);
+vector unsigned long vec_mergel (vector bool long, vector unsigned long);
vector double vec_min (vector double, vector double);
vector float vec_msub (vector float, vector float, vector float);
vector double vec_msub (vector double, vector double, vector double);
vector float vec_mul (vector float, vector float);
vector double vec_mul (vector double, vector double);
+vector long vec_mul (vector long, vector long);
+vector unsigned long vec_mul (vector unsigned long, vector unsigned long);
vector float vec_nearbyint (vector float);
vector double vec_nearbyint (vector double);
vector float vec_nmadd (vector float, vector float, vector float);
vector double vec_nmadd (vector double, vector double, vector double);
vector double vec_nmsub (vector double, vector double, vector double);
vector double vec_nor (vector double, vector double);
+vector long vec_nor (vector long, vector long);
+vector long vec_nor (vector long, vector bool long);
+vector long vec_nor (vector bool long, vector long);
+vector unsigned long vec_nor (vector unsigned long, vector unsigned long);
+vector unsigned long vec_nor (vector unsigned long, vector bool long);
+vector unsigned long vec_nor (vector bool long, vector unsigned long);
vector double vec_or (vector double, vector double);
vector double vec_or (vector double, vector bool long);
vector double vec_or (vector bool long, vector double);
-vector double vec_perm (vector double,
- vector double,
- vector unsigned char);
+vector long vec_or (vector long, vector long);
+vector long vec_or (vector long, vector bool long);
+vector long vec_or (vector bool long, vector long);
+vector unsigned long vec_or (vector unsigned long, vector unsigned long);
+vector unsigned long vec_or (vector unsigned long, vector bool long);
+vector unsigned long vec_or (vector bool long, vector unsigned long);
+vector double vec_perm (vector double, vector double, vector unsigned char);
+vector long vec_perm (vector long, vector long, vector unsigned char);
+vector unsigned long vec_perm (vector unsigned long, vector unsigned long,
+ vector unsigned char);
vector double vec_rint (vector double);
vector double vec_recip (vector double, vector double);
vector double vec_rsqrt (vector double);
vector double vec_rsqrte (vector double);
vector double vec_sel (vector double, vector double, vector bool long);
vector double vec_sel (vector double, vector double, vector unsigned long);
-vector double vec_sub (vector double, vector double);
+vector long vec_sel (vector long, vector long, vector long);
+vector long vec_sel (vector long, vector long, vector unsigned long);
+vector long vec_sel (vector long, vector long, vector bool long);
+vector unsigned long vec_sel (vector unsigned long, vector unsigned long,
+ vector long);
+vector unsigned long vec_sel (vector unsigned long, vector unsigned long,
+ vector unsigned long);
+vector unsigned long vec_sel (vector unsigned long, vector unsigned long,
+ vector bool long);
+vector double vec_splats (double);
+vector signed long vec_splats (signed long);
+vector unsigned long vec_splats (unsigned long);
vector float vec_sqrt (vector float);
vector double vec_sqrt (vector double);
void vec_st (vector double, int, vector double *);
void vec_st (vector double, int, double *);
+vector double vec_sub (vector double, vector double);
vector double vec_trunc (vector double);
vector double vec_xor (vector double, vector double);
vector double vec_xor (vector double, vector bool long);
vector double vec_xor (vector bool long, vector double);
+vector long vec_xor (vector long, vector long);
+vector long vec_xor (vector long, vector bool long);
+vector long vec_xor (vector bool long, vector long);
+vector unsigned long vec_xor (vector unsigned long, vector unsigned long);
+vector unsigned long vec_xor (vector unsigned long, vector bool long);
+vector unsigned long vec_xor (vector bool long, vector unsigned long);
int vec_all_eq (vector double, vector double);
int vec_all_ge (vector double, vector double);
int vec_all_gt (vector double, vector double);
vector unsigned long long);
int vec_all_eq (vector long long, vector long long);
+int vec_all_eq (vector unsigned long long, vector unsigned long long);
int vec_all_ge (vector long long, vector long long);
+int vec_all_ge (vector unsigned long long, vector unsigned long long);
int vec_all_gt (vector long long, vector long long);
+int vec_all_gt (vector unsigned long long, vector unsigned long long);
int vec_all_le (vector long long, vector long long);
+int vec_all_le (vector unsigned long long, vector unsigned long long);
int vec_all_lt (vector long long, vector long long);
+int vec_all_lt (vector unsigned long long, vector unsigned long long);
int vec_all_ne (vector long long, vector long long);
+int vec_all_ne (vector unsigned long long, vector unsigned long long);
+
int vec_any_eq (vector long long, vector long long);
+int vec_any_eq (vector unsigned long long, vector unsigned long long);
int vec_any_ge (vector long long, vector long long);
+int vec_any_ge (vector unsigned long long, vector unsigned long long);
int vec_any_gt (vector long long, vector long long);
+int vec_any_gt (vector unsigned long long, vector unsigned long long);
int vec_any_le (vector long long, vector long long);
+int vec_any_le (vector unsigned long long, vector unsigned long long);
int vec_any_lt (vector long long, vector long long);
+int vec_any_lt (vector unsigned long long, vector unsigned long long);
int vec_any_ne (vector long long, vector long long);
+int vec_any_ne (vector unsigned long long, vector unsigned long long);
vector long long vec_eqv (vector long long, vector long long);
vector long long vec_eqv (vector bool long long, vector long long);
vector unsigned long long vec_max (vector unsigned long long,
vector unsigned long long);
+vector signed int vec_mergee (vector signed int, vector signed int);
+vector unsigned int vec_mergee (vector unsigned int, vector unsigned int);
+vector bool int vec_mergee (vector bool int, vector bool int);
+
+vector signed int vec_mergeo (vector signed int, vector signed int);
+vector unsigned int vec_mergeo (vector unsigned int, vector unsigned int);
+vector bool int vec_mergeo (vector bool int, vector bool int);
+
vector long long vec_min (vector long long, vector long long);
vector unsigned long long vec_min (vector unsigned long long,
vector unsigned long long);
vector unsigned long long);
vector unsigned int vec_packsu (vector long long, vector long long);
+vector unsigned int vec_packsu (vector unsigned long long,
+ vector unsigned long long);
vector long long vec_rl (vector long long,
vector unsigned long long);
vector long long vec_vbpermq (vector signed char, vector signed char);
vector long long vec_vbpermq (vector unsigned char, vector unsigned char);
+vector long long vec_cntlz (vector long long);
+vector unsigned long long vec_cntlz (vector unsigned long long);
+vector int vec_cntlz (vector int);
+vector unsigned int vec_cntlz (vector int);
+vector short vec_cntlz (vector short);
+vector unsigned short vec_cntlz (vector unsigned short);
+vector signed char vec_cntlz (vector signed char);
+vector unsigned char vec_cntlz (vector unsigned char);
+
vector long long vec_vclz (vector long long);
vector unsigned long long vec_vclz (vector unsigned long long);
vector int vec_vclz (vector int);
+2014-12-10 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ Backport from mainline
+ 2014-09-02 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * gcc.target/powerpc/builtins-1.c: Add tests for vec_ctf,
+ vec_cts, and vec_ctu.
+ * gcc.target/powerpc/builtins-2.c: Likewise.
+
+ Backport from mainline
+ 2014-08-28 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * gcc.target/powerpc/builtins-1.c: Add tests for vec_xl, vec_xst,
+ vec_round, vec_splat, vec_div, and vec_mul.
+ * gcc.target/powerpc/builtins-2.c: New test.
+
+ Backport from mainline
+ 2014-08-20 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * testsuite/gcc.target/powerpc/builtins-1.c: New test.
+
2014-12-09 Uros Bizjak <ubizjak@gmail.com>
PR bootstrap/64213
--- /dev/null
+/* { dg-do compile { target { powerpc64le-*-* } } } */
+/* { dg-options "-mcpu=power8 -O0" } */
+
+/* Test that a number of newly added builtin overloads are accepted
+ by the compiler. */
+
+#include <altivec.h>
+
+vector double y = { 2.0, 4.0 };
+vector double z;
+
+int main ()
+{
+ vector float fa = {1.0, 2.0, 3.0, -4.0};
+ vector float fb = {-2.0, -3.0, -4.0, -5.0};
+ vector float fc = vec_cpsgn (fa, fb);
+
+ vector long long la = {5L, 14L};
+ vector long long lb = {3L, 86L};
+ vector long long lc = vec_and (la, lb);
+ vector bool long long ld = {0, -1};
+ vector long long le = vec_and (la, ld);
+ vector long long lf = vec_and (ld, lb);
+
+ vector unsigned long long ua = {5L, 14L};
+ vector unsigned long long ub = {3L, 86L};
+ vector unsigned long long uc = vec_and (ua, ub);
+ vector bool long long ud = {0, -1};
+ vector unsigned long long ue = vec_and (ua, ud);
+ vector unsigned long long uf = vec_and (ud, ub);
+
+ vector long long lg = vec_andc (la, lb);
+ vector long long lh = vec_andc (la, ld);
+ vector long long li = vec_andc (ld, lb);
+
+ vector unsigned long long ug = vec_andc (ua, ub);
+ vector unsigned long long uh = vec_andc (ua, ud);
+ vector unsigned long long ui = vec_andc (ud, ub);
+
+ vector double da = {1.0, -4.0};
+ vector double db = {-2.0, 5.0};
+ vector double dc = vec_cpsgn (da, db);
+
+ vector long long lj = vec_mergeh (la, lb);
+ vector long long lk = vec_mergeh (la, ld);
+ vector long long ll = vec_mergeh (ld, la);
+
+ vector unsigned long long uj = vec_mergeh (ua, ub);
+ vector unsigned long long uk = vec_mergeh (ua, ud);
+ vector unsigned long long ul = vec_mergeh (ud, ua);
+
+ vector long long lm = vec_mergel (la, lb);
+ vector long long ln = vec_mergel (la, ld);
+ vector long long lo = vec_mergel (ld, la);
+
+ vector unsigned long long um = vec_mergel (ua, ub);
+ vector unsigned long long un = vec_mergel (ua, ud);
+ vector unsigned long long uo = vec_mergel (ud, ua);
+
+ vector long long lp = vec_nor (la, lb);
+ vector long long lq = vec_nor (la, ld);
+ vector long long lr = vec_nor (ld, la);
+
+ vector unsigned long long up = vec_nor (ua, ub);
+ vector unsigned long long uq = vec_nor (ua, ud);
+ vector unsigned long long ur = vec_nor (ud, ua);
+
+ vector long long ls = vec_or (la, lb);
+ vector long long lt = vec_or (la, ld);
+ vector long long lu = vec_or (ld, la);
+
+ vector unsigned long long us = vec_or (ua, ub);
+ vector unsigned long long ut = vec_or (ua, ud);
+ vector unsigned long long uu = vec_or (ud, ua);
+
+ vector unsigned char ca = {0,4,8,1,5,9,2,6,10,3,7,11,15,12,14,13};
+ vector long long lv = vec_perm (la, lb, ca);
+ vector unsigned long long uv = vec_perm (ua, ub, ca);
+
+ vector long long lw = vec_sel (la, lb, lc);
+ vector long long lx = vec_sel (la, lb, uc);
+ vector long long ly = vec_sel (la, lb, ld);
+
+ vector unsigned long long uw = vec_sel (ua, ub, lc);
+ vector unsigned long long ux = vec_sel (ua, ub, uc);
+ vector unsigned long long uy = vec_sel (ua, ub, ld);
+
+ vector long long lz = vec_xor (la, lb);
+ vector long long l0 = vec_xor (la, ld);
+ vector long long l1 = vec_xor (ld, la);
+
+ vector unsigned long long uz = vec_xor (ua, ub);
+ vector unsigned long long u0 = vec_xor (ua, ud);
+ vector unsigned long long u1 = vec_xor (ud, ua);
+
+ int ia = vec_all_eq (ua, ub);
+ int ib = vec_all_ge (ua, ub);
+ int ic = vec_all_gt (ua, ub);
+ int id = vec_all_le (ua, ub);
+ int ie = vec_all_lt (ua, ub);
+ int ig = vec_all_ne (ua, ub);
+
+ int ih = vec_any_eq (ua, ub);
+ int ii = vec_any_ge (ua, ub);
+ int ij = vec_any_gt (ua, ub);
+ int ik = vec_any_le (ua, ub);
+ int il = vec_any_lt (ua, ub);
+ int im = vec_any_ne (ua, ub);
+
+ vector int sia = {9, 16, 25, 36};
+ vector int sib = {-8, -27, -64, -125};
+ vector int sic = vec_mergee (sia, sib);
+ vector int sid = vec_mergeo (sia, sib);
+
+ vector unsigned int uia = {9, 16, 25, 36};
+ vector unsigned int uib = {8, 27, 64, 125};
+ vector unsigned int uic = vec_mergee (uia, uib);
+ vector unsigned int uid = vec_mergeo (uia, uib);
+
+ vector bool int bia = {0, -1, -1, 0};
+ vector bool int bib = {-1, -1, 0, -1};
+ vector bool int bic = vec_mergee (bia, bib);
+ vector bool int bid = vec_mergeo (bia, bib);
+
+ vector unsigned int uie = vec_packsu (ua, ub);
+
+ vector long long l2 = vec_cntlz (la);
+ vector unsigned long long u2 = vec_cntlz (ua);
+ vector int sie = vec_cntlz (sia);
+ vector unsigned int uif = vec_cntlz (uia);
+ vector short ssa = {20, -40, -60, 80, 100, -120, -140, 160};
+ vector short ssb = vec_cntlz (ssa);
+ vector unsigned short usa = {81, 72, 63, 54, 45, 36, 27, 18};
+ vector unsigned short usb = vec_cntlz (usa);
+ vector signed char sca = {-4, 3, -9, 15, -31, 31, 0, 0,
+ 1, 117, -36, 99, 98, 97, 96, 95};
+ vector signed char scb = vec_cntlz (sca);
+ vector unsigned char cb = vec_cntlz (ca);
+
+ vector double dd = vec_xl (0, &y);
+ vec_xst (dd, 0, &z);
+
+ vector double de = vec_round (dd);
+
+ vector double df = vec_splat (de, 0);
+ vector double dg = vec_splat (de, 1);
+ vector long long l3 = vec_splat (l2, 0);
+ vector long long l4 = vec_splat (l2, 1);
+ vector unsigned long long u3 = vec_splat (u2, 0);
+ vector unsigned long long u4 = vec_splat (u2, 1);
+ vector bool long long l5 = vec_splat (ld, 0);
+ vector bool long long l6 = vec_splat (ld, 1);
+
+ vector long long l7 = vec_div (l3, l4);
+ vector unsigned long long u5 = vec_div (u3, u4);
+
+ vector long long l8 = vec_mul (l3, l4);
+ vector unsigned long long u6 = vec_mul (u3, u4);
+
+ vector double dh = vec_ctf (la, -2);
+ vector double di = vec_ctf (ua, 2);
+ vector long long l9 = vec_cts (dh, -2);
+ vector unsigned long long u7 = vec_ctu (di, 2);
+
+ return 0;
+}
--- /dev/null
+/* { dg-do run { target { powerpc64le-*-* } } } */
+/* { dg-options "-mcpu=power8 " } */
+
+#include <altivec.h>
+
+void abort (void);
+
+int main ()
+{
+ vector long long sa = {27L, -14L};
+ vector long long sb = {-9L, -2L};
+
+ vector unsigned long long ua = {27L, 14L};
+ vector unsigned long long ub = {9L, 2L};
+
+ vector long long sc = vec_div (sa, sb);
+ vector unsigned long long uc = vec_div (ua, ub);
+
+ if (sc[0] != -3L || sc[1] != 7L || uc[0] != 3L || uc[1] != 7L)
+ abort ();
+
+ vector long long sd = vec_mul (sa, sb);
+ vector unsigned long long ud = vec_mul (ua, ub);
+
+ if (sd[0] != -243L || sd[1] != 28L || ud[0] != 243L || ud[1] != 28L)
+ abort ();
+
+ vector long long se = vec_splat (sa, 0);
+ vector long long sf = vec_splat (sa, 1);
+ vector unsigned long long ue = vec_splat (ua, 0);
+ vector unsigned long long uf = vec_splat (ua, 1);
+
+ if (se[0] != 27L || se[1] != 27L || sf[0] != -14L || sf[1] != -14L
+ || ue[0] != 27L || ue[1] != 27L || uf[0] != 14L || uf[1] != 14L)
+ abort ();
+
+ vector double da = vec_ctf (sa, -2);
+ vector double db = vec_ctf (ua, 2);
+ vector long long sg = vec_cts (da, -2);
+ vector unsigned long long ug = vec_ctu (db, 2);
+
+ if (da[0] != 108.0 || da[1] != -56.0 || db[0] != 6.75 || db[1] != 3.5
+ || sg[0] != 27L || sg[1] != -14L || ug[0] != 27L || ug[1] != 14L)
+ abort ();
+
+ return 0;
+}