]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
altivec.md (altivec_lvxl): Rename as *altivec_lvxl_<mode>_internal and use VM2 iterat...
authorWilliam Schmidt <wschmidt@gcc.gnu.org>
Fri, 21 Feb 2014 20:46:52 +0000 (20:46 +0000)
committerWilliam Schmidt <wschmidt@gcc.gnu.org>
Fri, 21 Feb 2014 20:46:52 +0000 (20:46 +0000)
gcc:

2014-02-21  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>

* config/rs6000/altivec.md (altivec_lvxl): Rename as
*altivec_lvxl_<mode>_internal and use VM2 iterator instead of
V4SI.
(altivec_lvxl_<mode>): New define_expand incorporating
-maltivec=be semantics where needed.
(altivec_lvx): Rename as *altivec_lvx_<mode>_internal.
(altivec_lvx_<mode>): New define_expand incorporating -maltivec=be
semantics where needed.
(altivec_stvx): Rename as *altivec_stvx_<mode>_internal.
(altivec_stvx_<mode>): New define_expand incorporating
-maltivec=be semantics where needed.
(altivec_stvxl): Rename as *altivec_stvxl_<mode>_internal and use
VM2 iterator instead of V4SI.
(altivec_stvxl_<mode>): New define_expand incorporating
-maltivec=be semantics where needed.
* config/rs6000/rs6000-builtin.def: Add new built-in definitions
LVXL_V2DF, LVXL_V2DI, LVXL_V4SF, LVXL_V4SI, LVXL_V8HI, LVXL_V16QI,
LVX_V2DF, LVX_V2DI, LVX_V4SF, LVX_V4SI, LVX_V8HI, LVX_V16QI,
STVX_V2DF, STVX_V2DI, STVX_V4SF, STVX_V4SI, STVX_V8HI, STVX_V16QI,
STVXL_V2DF, STVXL_V2DI, STVXL_V4SF, STVXL_V4SI, STVXL_V8HI,
STVXL_V16QI.
* config/rs6000/rs6000-c.c (altivec_overloaded_builtins): Replace
ALTIVEC_BUILTIN_LVX with ALTIVEC_BUILTIN_LVX_<MODE> throughout;
similarly for ALTIVEC_BUILTIN_LVXL, ALTIVEC_BUILTIN_STVX, and
ALTIVEC_BUILTIN_STVXL.
* config/rs6000/rs6000-protos.h (altivec_expand_lvx_be): New
prototype.
(altivec_expand_stvx_be): Likewise.
* config/rs6000/rs6000.c (swap_selector_for_mode): New function.
(altivec_expand_lvx_be): Likewise.
(altivec_expand_stvx_be): Likewise.
(altivec_expand_builtin): Add cases for
ALTIVEC_BUILTIN_STVX_<MODE>, ALTIVEC_BUILTIN_STVXL_<MODE>,
ALTIVEC_BUILTIN_LVXL_<MODE>, and ALTIVEC_BUILTIN_LVX_<MODE>.
(altivec_init_builtins): Add definitions for
__builtin_altivec_lvxl_<mode>, __builtin_altivec_lvx_<mode>,
__builtin_altivec_stvx_<mode>, and
__builtin_altivec_stvxl_<mode>.

gcc/testsuite:

2014-02-21  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>

* gcc.dg/vmx/ld.c: New test.
* gcc.dg/vmx/ld-be-order.c: New test.
* gcc.dg/vmx/ld-vsx.c: New test.
* gcc.dg/vmx/ld-vsx-be-order.c: New test.
* gcc.dg/vmx/ldl.c: New test.
* gcc.dg/vmx/ldl-be-order.c: New test.
* gcc.dg/vmx/ldl-vsx.c: New test.
* gcc.dg/vmx/ldl-vsx-be-order.c: New test.
* gcc.dg/vmx/st.c: New test.
* gcc.dg/vmx/st-be-order.c: New test.
* gcc.dg/vmx/st-vsx.c: New test.
* gcc.dg/vmx/st-vsx-be-order.c: New test.
* gcc.dg/vmx/stl.c: New test.
* gcc.dg/vmx/stl-be-order.c: New test.
* gcc.dg/vmx/stl-vsx.c: New test.
* gcc.dg/vmx/stl-vsx-be-order.c: New test.

From-SVN: r208019

21 files changed:
gcc/config/rs6000/altivec.md
gcc/config/rs6000/rs6000-builtin.def
gcc/config/rs6000/rs6000-c.c
gcc/config/rs6000/rs6000-protos.h
gcc/config/rs6000/rs6000.c
gcc/testsuite/gcc.dg/vmx/ld-be-order.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/ld-vsx-be-order.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/ld-vsx.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/ld.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/ldl-be-order.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/ldl-vsx-be-order.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/ldl-vsx.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/ldl.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/st-be-order.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/st-vsx-be-order.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/st-vsx.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/st.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/stl-be-order.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/stl-vsx-be-order.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/stl-vsx.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/vmx/stl.c [new file with mode: 0644]

index afe621c5e54296eca9214f45a2ba8903afe21822..115354a2d50a6167db6ff1414823a3ccf8a8030b 100644 (file)
   "lvewx %0,%y1"
   [(set_attr "type" "vecload")])
 
-(define_insn "altivec_lvxl"
+(define_expand "altivec_lvxl_<mode>"
   [(parallel
-    [(set (match_operand:V4SI 0 "register_operand" "=v")
-         (match_operand:V4SI 1 "memory_operand" "Z"))
+    [(set (match_operand:VM2 0 "register_operand" "=v")
+         (match_operand:VM2 1 "memory_operand" "Z"))
+     (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
+  "TARGET_ALTIVEC"
+{
+  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
+    {
+      altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_SET_VSCR);
+      DONE;
+    }
+})
+
+(define_insn "*altivec_lvxl_<mode>_internal"
+  [(parallel
+    [(set (match_operand:VM2 0 "register_operand" "=v")
+         (match_operand:VM2 1 "memory_operand" "Z"))
      (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
   "TARGET_ALTIVEC"
-  "lvxl %0,%y1"
+  "lvx %0,%y1"
   [(set_attr "type" "vecload")])
 
-(define_insn "altivec_lvx_<mode>"
+(define_expand "altivec_lvx_<mode>"
+  [(parallel
+    [(set (match_operand:VM2 0 "register_operand" "=v")
+         (match_operand:VM2 1 "memory_operand" "Z"))
+     (unspec [(const_int 0)] UNSPEC_LVX)])]
+  "TARGET_ALTIVEC"
+{
+  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
+    {
+      altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_LVX);
+      DONE;
+    }
+})
+
+(define_insn "*altivec_lvx_<mode>_internal"
   [(parallel
     [(set (match_operand:VM2 0 "register_operand" "=v")
          (match_operand:VM2 1 "memory_operand" "Z"))
   "lvx %0,%y1"
   [(set_attr "type" "vecload")])
 
-(define_insn "altivec_stvx_<mode>"
+(define_expand "altivec_stvx_<mode>"
+  [(parallel
+    [(set (match_operand:VM2 0 "memory_operand" "=Z")
+         (match_operand:VM2 1 "register_operand" "v"))
+     (unspec [(const_int 0)] UNSPEC_STVX)])]
+  "TARGET_ALTIVEC"
+{
+  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
+    {
+      altivec_expand_stvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVX);
+      DONE;
+    }
+})
+
+(define_insn "*altivec_stvx_<mode>_internal"
   [(parallel
     [(set (match_operand:VM2 0 "memory_operand" "=Z")
          (match_operand:VM2 1 "register_operand" "v"))
   "stvx %1,%y0"
   [(set_attr "type" "vecstore")])
 
-(define_insn "altivec_stvxl"
+(define_expand "altivec_stvxl_<mode>"
+  [(parallel
+    [(set (match_operand:VM2 0 "memory_operand" "=Z")
+         (match_operand:VM2 1 "register_operand" "v"))
+     (unspec [(const_int 0)] UNSPEC_STVXL)])]
+  "TARGET_ALTIVEC"
+{
+  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
+    {
+      altivec_expand_stvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVXL);
+      DONE;
+    }
+})
+
+(define_insn "*altivec_stvxl_<mode>_internal"
   [(parallel
-    [(set (match_operand:V4SI 0 "memory_operand" "=Z")
-         (match_operand:V4SI 1 "register_operand" "v"))
+    [(set (match_operand:VM2 0 "memory_operand" "=Z")
+         (match_operand:VM2 1 "register_operand" "v"))
      (unspec [(const_int 0)] UNSPEC_STVXL)])]
   "TARGET_ALTIVEC"
   "stvxl %1,%y0"
index b7b67fae1c8f0be289607915b2684e352e3640b2..46df66b7b73e45b9ffae119cd7ea29fc6b2aa07f 100644 (file)
@@ -793,8 +793,26 @@ BU_ALTIVEC_X (LVEBX,               "lvebx",            MEM)
 BU_ALTIVEC_X (LVEHX,           "lvehx",            MEM)
 BU_ALTIVEC_X (LVEWX,           "lvewx",            MEM)
 BU_ALTIVEC_X (LVXL,            "lvxl",             MEM)
+BU_ALTIVEC_X (LVXL_V2DF,       "lvxl_v2df",        MEM)
+BU_ALTIVEC_X (LVXL_V2DI,       "lvxl_v2di",        MEM)
+BU_ALTIVEC_X (LVXL_V4SF,       "lvxl_v4sf",        MEM)
+BU_ALTIVEC_X (LVXL_V4SI,       "lvxl_v4si",        MEM)
+BU_ALTIVEC_X (LVXL_V8HI,       "lvxl_v8hi",        MEM)
+BU_ALTIVEC_X (LVXL_V16QI,      "lvxl_v16qi",       MEM)
 BU_ALTIVEC_X (LVX,             "lvx",              MEM)
+BU_ALTIVEC_X (LVX_V2DF,                "lvx_v2df",         MEM)
+BU_ALTIVEC_X (LVX_V2DI,                "lvx_v2di",         MEM)
+BU_ALTIVEC_X (LVX_V4SF,                "lvx_v4sf",         MEM)
+BU_ALTIVEC_X (LVX_V4SI,                "lvx_v4si",         MEM)
+BU_ALTIVEC_X (LVX_V8HI,                "lvx_v8hi",         MEM)
+BU_ALTIVEC_X (LVX_V16QI,       "lvx_v16qi",        MEM)
 BU_ALTIVEC_X (STVX,            "stvx",             MEM)
+BU_ALTIVEC_X (STVX_V2DF,       "stvx_v2df",        MEM)
+BU_ALTIVEC_X (STVX_V2DI,       "stvx_v2di",        MEM)
+BU_ALTIVEC_X (STVX_V4SF,       "stvx_v4sf",        MEM)
+BU_ALTIVEC_X (STVX_V4SI,       "stvx_v4si",        MEM)
+BU_ALTIVEC_X (STVX_V8HI,       "stvx_v8hi",        MEM)
+BU_ALTIVEC_X (STVX_V16QI,      "stvx_v16qi",       MEM)
 BU_ALTIVEC_C (LVLX,            "lvlx",             MEM)
 BU_ALTIVEC_C (LVLXL,           "lvlxl",            MEM)
 BU_ALTIVEC_C (LVRX,            "lvrx",             MEM)
@@ -803,6 +821,12 @@ BU_ALTIVEC_X (STVEBX,              "stvebx",           MEM)
 BU_ALTIVEC_X (STVEHX,          "stvehx",           MEM)
 BU_ALTIVEC_X (STVEWX,          "stvewx",           MEM)
 BU_ALTIVEC_X (STVXL,           "stvxl",            MEM)
+BU_ALTIVEC_X (STVXL_V2DF,      "stvxl_v2df",       MEM)
+BU_ALTIVEC_X (STVXL_V2DI,      "stvxl_v2di",       MEM)
+BU_ALTIVEC_X (STVXL_V4SF,      "stvxl_v4sf",       MEM)
+BU_ALTIVEC_X (STVXL_V4SI,      "stvxl_v4si",       MEM)
+BU_ALTIVEC_X (STVXL_V8HI,      "stvxl_v8hi",       MEM)
+BU_ALTIVEC_X (STVXL_V16QI,     "stvxl_v16qi",      MEM)
 BU_ALTIVEC_C (STVLX,           "stvlx",            MEM)
 BU_ALTIVEC_C (STVLXL,          "stvlxl",           MEM)
 BU_ALTIVEC_C (STVRX,           "stvrx",            MEM)
index acdd4b497771cc9e10caa02d49e3852e1871a73a..73edd2b895aa4ee590faa3b6d350c199bd6a881b 100644 (file)
@@ -1108,54 +1108,54 @@ const struct altivec_builtin_types altivec_overloaded_builtins[] = {
     RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
   { VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_XVDIVDP,
     RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DF,
     RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DI,
     RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DI,
     RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
     ~RS6000_BTI_unsigned_V2DI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DI,
     RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V2DI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SF,
     RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SF,
     RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI,
     RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI,
     RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI,
     RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI,
     RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI,
     RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI,
     RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI,
     RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V8HI,
     RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V8HI,
     RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V8HI,
     RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V8HI,
     RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V8HI,
     RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V8HI,
     RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V16QI,
     RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V16QI,
     RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V16QI,
     RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V16QI,
     RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+  { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V16QI,
     RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
   { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEBX,
     RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
@@ -1193,55 +1193,55 @@ const struct altivec_builtin_types altivec_overloaded_builtins[] = {
     RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
   { ALTIVEC_BUILTIN_VEC_LVEBX, ALTIVEC_BUILTIN_LVEBX,
     RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SF,
     RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SF,
     RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI,
     RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI,
     RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI,
     RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI,
     RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI,
     RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI,
     RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI,
     RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI,
     RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI,
     RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI,
     RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI,
     RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI,
     RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI,
     RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V16QI,
     RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V16QI,
     RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V16QI,
     RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V16QI,
     RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI,
     ~RS6000_BTI_unsigned_V16QI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V16QI,
     RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V2DF,
     RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V2DI,
     RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V2DI,
     RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
     ~RS6000_BTI_unsigned_V2DI, 0 },
-  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+  { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V2DI,
     RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V2DI, 0 },
   { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
     RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
@@ -2857,63 +2857,63 @@ const struct altivec_builtin_types altivec_overloaded_builtins[] = {
     RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_NOT_OPAQUE },
   { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI,
     RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_NOT_OPAQUE },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DF,
     RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DI,
     RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
     ~RS6000_BTI_unsigned_V2DI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DI,
     RS6000_BTI_void, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI,
     ~RS6000_BTI_bool_V2DI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SF,
     RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SF,
     RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI,
     RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI,
     RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI,
     RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI,
     RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI,
     RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI,
     RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI,
     RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI,
     RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI,
     RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI,
     RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI,
     RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI,
     RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI,
     RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI,
     RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI,
     RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
-  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+  { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI,
     RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
   { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
     RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
@@ -2985,64 +2985,64 @@ const struct altivec_builtin_types altivec_overloaded_builtins[] = {
     RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
   { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
     RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SF,
     RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SF,
     RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI,
     RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI,
     RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI,
     RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI,
     RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI,
     RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI,
     RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI,
     RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI,
     RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI,
     RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI,
     RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI,
     RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI,
     RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI,
     RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI,
     RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI,
     RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI,
     RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V2DF,
     RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V2DF,
     RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V2DI,
     RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V2DI,
     RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
     ~RS6000_BTI_unsigned_V2DI },
-  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+  { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V2DI,
     RS6000_BTI_void, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI,
     ~RS6000_BTI_bool_V2DI },
   { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
index 9e91081340e2329ee1c67b46d1214ccbd73468b5..84d466ec890054ce14f1e2d215f43def51240d22 100644 (file)
@@ -58,6 +58,8 @@ extern void rs6000_expand_vector_extract (rtx, rtx, int);
 extern bool altivec_expand_vec_perm_const (rtx op[4]);
 extern void altivec_expand_vec_perm_le (rtx op[4]);
 extern bool rs6000_expand_vec_perm_const (rtx op[4]);
+extern void altivec_expand_lvx_be (rtx, rtx, enum machine_mode, unsigned);
+extern void altivec_expand_stvx_be (rtx, rtx, enum machine_mode, unsigned);
 extern void rs6000_expand_extract_even (rtx, rtx, rtx);
 extern void rs6000_expand_interleave (rtx, rtx, rtx, bool);
 extern void build_mask64_2_operands (rtx, rtx *);
index ce9cea6a509d53ef5fc4188dbadd8400bf596426..a9c99bdc1ccf2a7983a64cdbdc6d5c1977e6abff 100644 (file)
@@ -11808,6 +11808,83 @@ paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
   return target;
 }
 
+/* Return a constant vector for use as a little-endian permute control vector
+   to reverse the order of elements of the given vector mode.  */
+static rtx
+swap_selector_for_mode (enum machine_mode mode)
+{
+  /* These are little endian vectors, so their elements are reversed
+     from what you would normally expect for a permute control vector.  */
+  unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
+  unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
+  unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
+  unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+  unsigned int *swaparray, i;
+  rtx perm[16];
+
+  switch (mode)
+    {
+    case V2DFmode:
+    case V2DImode:
+      swaparray = swap2;
+      break;
+    case V4SFmode:
+    case V4SImode:
+      swaparray = swap4;
+      break;
+    case V8HImode:
+      swaparray = swap8;
+      break;
+    case V16QImode:
+      swaparray = swap16;
+      break;
+    default:
+      gcc_unreachable ();
+    }
+
+  for (i = 0; i < 16; ++i)
+    perm[i] = GEN_INT (swaparray[i]);
+
+  return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
+}
+
+/* Generate code for an "lvx" or "lvxl" built-in for a little endian target
+   with -maltivec=be specified.  Issue the load followed by an element-reversing
+   permute.  */
+void
+altivec_expand_lvx_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
+{
+  rtx tmp = gen_reg_rtx (mode);
+  rtx load = gen_rtx_SET (VOIDmode, tmp, op1);
+  rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
+  rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
+  rtx sel = swap_selector_for_mode (mode);
+  rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
+
+  gcc_assert (REG_P (op0));
+  emit_insn (par);
+  emit_insn (gen_rtx_SET (VOIDmode, op0, vperm));
+}
+
+/* Generate code for a "stvx" or "stvxl" built-in for a little endian target
+   with -maltivec=be specified.  Issue the store preceded by an element-reversing
+   permute.  */
+void
+altivec_expand_stvx_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
+{
+  rtx tmp = gen_reg_rtx (mode);
+  rtx store = gen_rtx_SET (VOIDmode, op0, tmp);
+  rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
+  rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
+  rtx sel = swap_selector_for_mode (mode);
+  rtx vperm;
+
+  gcc_assert (REG_P (op1));
+  vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
+  emit_insn (gen_rtx_SET (VOIDmode, tmp, vperm));
+  emit_insn (par);
+}
+
 static rtx
 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
 {
@@ -12597,16 +12674,38 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
 
   switch (fcode)
     {
+    case ALTIVEC_BUILTIN_STVX_V2DF:
+      return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
+    case ALTIVEC_BUILTIN_STVX_V2DI:
+      return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
+    case ALTIVEC_BUILTIN_STVX_V4SF:
+      return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
     case ALTIVEC_BUILTIN_STVX:
+    case ALTIVEC_BUILTIN_STVX_V4SI:
       return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
+    case ALTIVEC_BUILTIN_STVX_V8HI:
+      return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
+    case ALTIVEC_BUILTIN_STVX_V16QI:
+      return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
     case ALTIVEC_BUILTIN_STVEBX:
       return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
     case ALTIVEC_BUILTIN_STVEHX:
       return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
     case ALTIVEC_BUILTIN_STVEWX:
       return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
+    case ALTIVEC_BUILTIN_STVXL_V2DF:
+      return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
+    case ALTIVEC_BUILTIN_STVXL_V2DI:
+      return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
+    case ALTIVEC_BUILTIN_STVXL_V4SF:
+      return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
     case ALTIVEC_BUILTIN_STVXL:
-      return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
+    case ALTIVEC_BUILTIN_STVXL_V4SI:
+      return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
+    case ALTIVEC_BUILTIN_STVXL_V8HI:
+      return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
+    case ALTIVEC_BUILTIN_STVXL_V16QI:
+      return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
 
     case ALTIVEC_BUILTIN_STVLX:
       return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
@@ -12750,12 +12849,44 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
     case ALTIVEC_BUILTIN_LVEWX:
       return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
                                        exp, target, false);
+    case ALTIVEC_BUILTIN_LVXL_V2DF:
+      return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
+                                       exp, target, false);
+    case ALTIVEC_BUILTIN_LVXL_V2DI:
+      return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
+                                       exp, target, false);
+    case ALTIVEC_BUILTIN_LVXL_V4SF:
+      return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
+                                       exp, target, false);
     case ALTIVEC_BUILTIN_LVXL:
-      return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
+    case ALTIVEC_BUILTIN_LVXL_V4SI:
+      return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
+                                       exp, target, false);
+    case ALTIVEC_BUILTIN_LVXL_V8HI:
+      return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
+                                       exp, target, false);
+    case ALTIVEC_BUILTIN_LVXL_V16QI:
+      return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
+                                       exp, target, false);
+    case ALTIVEC_BUILTIN_LVX_V2DF:
+      return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
+                                       exp, target, false);
+    case ALTIVEC_BUILTIN_LVX_V2DI:
+      return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
+                                       exp, target, false);
+    case ALTIVEC_BUILTIN_LVX_V4SF:
+      return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
                                        exp, target, false);
     case ALTIVEC_BUILTIN_LVX:
+    case ALTIVEC_BUILTIN_LVX_V4SI:
       return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
                                        exp, target, false);
+    case ALTIVEC_BUILTIN_LVX_V8HI:
+      return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
+                                       exp, target, false);
+    case ALTIVEC_BUILTIN_LVX_V16QI:
+      return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
+                                       exp, target, false);
     case ALTIVEC_BUILTIN_LVLX:
       return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
                                        exp, target, true);
@@ -14085,10 +14216,58 @@ altivec_init_builtins (void)
   def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
   def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
   def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
+  def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
+              ALTIVEC_BUILTIN_LVXL_V2DF);
+  def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
+              ALTIVEC_BUILTIN_LVXL_V2DI);
+  def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
+              ALTIVEC_BUILTIN_LVXL_V4SF);
+  def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
+              ALTIVEC_BUILTIN_LVXL_V4SI);
+  def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
+              ALTIVEC_BUILTIN_LVXL_V8HI);
+  def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
+              ALTIVEC_BUILTIN_LVXL_V16QI);
   def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
+  def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
+              ALTIVEC_BUILTIN_LVX_V2DF);
+  def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
+              ALTIVEC_BUILTIN_LVX_V2DI);
+  def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
+              ALTIVEC_BUILTIN_LVX_V4SF);
+  def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
+              ALTIVEC_BUILTIN_LVX_V4SI);
+  def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
+              ALTIVEC_BUILTIN_LVX_V8HI);
+  def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
+              ALTIVEC_BUILTIN_LVX_V16QI);
   def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
+  def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
+              ALTIVEC_BUILTIN_STVX_V2DF);
+  def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
+              ALTIVEC_BUILTIN_STVX_V2DI);
+  def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
+              ALTIVEC_BUILTIN_STVX_V4SF);
+  def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
+              ALTIVEC_BUILTIN_STVX_V4SI);
+  def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
+              ALTIVEC_BUILTIN_STVX_V8HI);
+  def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
+              ALTIVEC_BUILTIN_STVX_V16QI);
   def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
   def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
+  def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
+              ALTIVEC_BUILTIN_STVXL_V2DF);
+  def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
+              ALTIVEC_BUILTIN_STVXL_V2DI);
+  def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
+              ALTIVEC_BUILTIN_STVXL_V4SF);
+  def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
+              ALTIVEC_BUILTIN_STVXL_V4SI);
+  def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
+              ALTIVEC_BUILTIN_STVXL_V8HI);
+  def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
+              ALTIVEC_BUILTIN_STVXL_V16QI);
   def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
   def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
   def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
diff --git a/gcc/testsuite/gcc.dg/vmx/ld-be-order.c b/gcc/testsuite/gcc.dg/vmx/ld-be-order.c
new file mode 100644 (file)
index 0000000..903b997
--- /dev/null
@@ -0,0 +1,107 @@
+/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
+
+#include "harness.h"
+
+static unsigned char svuc[16] __attribute__ ((aligned (16)));
+static signed char svsc[16] __attribute__ ((aligned (16)));
+static unsigned char svbc[16] __attribute__ ((aligned (16)));
+static unsigned short svus[8] __attribute__ ((aligned (16)));
+static signed short svss[8] __attribute__ ((aligned (16)));
+static unsigned short svbs[8] __attribute__ ((aligned (16)));
+static unsigned short svp[8] __attribute__ ((aligned (16)));
+static unsigned int svui[4] __attribute__ ((aligned (16)));
+static signed int svsi[4] __attribute__ ((aligned (16)));
+static unsigned int svbi[4] __attribute__ ((aligned (16)));
+static float svf[4] __attribute__ ((aligned (16)));
+
+static void init ()
+{
+  unsigned int i;
+  for (i = 0; i < 16; ++i)
+    {
+      svuc[i] = i;
+      svsc[i] = i - 8;
+      svbc[i] = (i % 2) ? 0xff : 0;
+    }
+  for (i = 0; i < 8; ++i)
+    {
+      svus[i] = i;
+      svss[i] = i - 4;
+      svbs[i] = (i % 2) ? 0xffff : 0;
+      svp[i] = i;
+    }
+  for (i = 0; i < 4; ++i)
+    {
+      svui[i] = i;
+      svsi[i] = i - 2;
+      svbi[i] = (i % 2) ? 0xffffffff : 0;
+      svf[i] = i * 1.0f;
+    }
+}
+
+static void test ()
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+  vector unsigned char evuc = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  vector signed char evsc = {7,6,5,4,3,2,1,0,-1,-2,-3,-4,-5,-6,-7,-8};
+  vector bool char evbc = {255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0};
+  vector unsigned short evus = {7,6,5,4,3,2,1,0};
+  vector signed short evss = {3,2,1,0,-1,-2,-3,-4};
+  vector bool short evbs = {65535,0,65535,0,65535,0,65535,0};
+  vector pixel evp = {7,6,5,4,3,2,1,0};
+  vector unsigned int evui = {3,2,1,0};
+  vector signed int evsi = {1,0,-1,-2};
+  vector bool int evbi = {0xffffffff,0,0xffffffff,0};
+  vector float evf = {3.0,2.0,1.0,0.0};
+#else
+  vector unsigned char evuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+  vector signed char evsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
+  vector bool char evbc = {0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255};
+  vector unsigned short evus = {0,1,2,3,4,5,6,7};
+  vector signed short evss = {-4,-3,-2,-1,0,1,2,3};
+  vector bool short evbs = {0,65535,0,65535,0,65535,0,65535};
+  vector pixel evp = {0,1,2,3,4,5,6,7};
+  vector unsigned int evui = {0,1,2,3};
+  vector signed int evsi = {-2,-1,0,1};
+  vector bool int evbi = {0,0xffffffff,0,0xffffffff};
+  vector float evf = {0.0,1.0,2.0,3.0};
+#endif
+
+  vector unsigned char vuc;
+  vector signed char vsc;
+  vector bool char vbc;
+  vector unsigned short vus;
+  vector signed short vss;
+  vector bool short vbs;
+  vector pixel vp;
+  vector unsigned int vui;
+  vector signed int vsi;
+  vector bool int vbi;
+  vector float vf;
+
+  init ();
+
+  vuc = vec_ld (0, (vector unsigned char *)svuc);
+  vsc = vec_ld (0, (vector signed char *)svsc);
+  vbc = vec_ld (0, (vector bool char *)svbc);
+  vus = vec_ld (0, (vector unsigned short *)svus);
+  vss = vec_ld (0, (vector signed short *)svss);
+  vbs = vec_ld (0, (vector bool short *)svbs);
+  vp  = vec_ld (0, (vector pixel *)svp);
+  vui = vec_ld (0, (vector unsigned int *)svui);
+  vsi = vec_ld (0, (vector signed int *)svsi);
+  vbi = vec_ld (0, (vector bool int *)svbi);
+  vf  = vec_ld (0, (vector float *)svf);
+
+  check (vec_all_eq (vuc, evuc), "vuc");
+  check (vec_all_eq (vsc, evsc), "vsc");
+  check (vec_all_eq (vbc, evbc), "vbc");
+  check (vec_all_eq (vus, evus), "vus");
+  check (vec_all_eq (vss, evss), "vss");
+  check (vec_all_eq (vbs, evbs), "vbs");
+  check (vec_all_eq (vp,  evp ), "vp" );
+  check (vec_all_eq (vui, evui), "vui");
+  check (vec_all_eq (vsi, evsi), "vsi");
+  check (vec_all_eq (vbi, evbi), "vbi");
+  check (vec_all_eq (vf,  evf ), "vf" );
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ld-vsx-be-order.c b/gcc/testsuite/gcc.dg/vmx/ld-vsx-be-order.c
new file mode 100644 (file)
index 0000000..64ea0ba
--- /dev/null
@@ -0,0 +1,40 @@
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */
+
+#include "harness.h"
+
+static unsigned long svul[2] __attribute__ ((aligned (16)));
+static double svd[2] __attribute__ ((aligned (16)));
+
+static void init ()
+{
+  unsigned int i;
+  for (i = 0; i < 2; ++i)
+    {
+      svul[i] = i;
+      svd[i] = i * 1.0;
+    }
+}
+
+static void test ()
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+  vector unsigned long evul = {1,0};
+  vector double evd = {1.0,0.0};
+#else
+  vector unsigned long evul = {0,1};
+  vector double evd = {0.0,1.0};
+#endif
+
+  vector unsigned long vul;
+  vector double vd;
+
+  init ();
+
+  vul = vec_ld (0, (vector unsigned long *)svul);
+  vd  = vec_ld (0, (vector double *)svd);
+
+  check (vec_all_eq (vul, evul), "vul");
+  check (vec_all_eq (vd,  evd ), "vd" );
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ld-vsx.c b/gcc/testsuite/gcc.dg/vmx/ld-vsx.c
new file mode 100644 (file)
index 0000000..87da7bf
--- /dev/null
@@ -0,0 +1,35 @@
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-maltivec -mabi=altivec -std=gnu99 -mvsx" } */
+
+#include "harness.h"
+
+static unsigned long svul[2] __attribute__ ((aligned (16)));
+static double svd[2] __attribute__ ((aligned (16)));
+
+static void init ()
+{
+  unsigned int i;
+  for (i = 0; i < 2; ++i)
+    {
+      svul[i] = i;
+      svd[i] = i * 1.0;
+    }
+}
+
+static void test ()
+{
+  vector unsigned long evul = {0,1};
+  vector double evd = {0.0,1.0};
+
+  vector unsigned long vul;
+  vector double vd;
+
+  init ();
+
+  vul = vec_ld (0, (vector unsigned long *)svul);
+  vd  = vec_ld (0, (vector double *)svd);
+
+  check (vec_all_eq (vul, evul), "vul");
+  check (vec_all_eq (vd,  evd ), "vd" );
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ld.c b/gcc/testsuite/gcc.dg/vmx/ld.c
new file mode 100644 (file)
index 0000000..851fbd5
--- /dev/null
@@ -0,0 +1,91 @@
+#include "harness.h"
+
+static unsigned char svuc[16] __attribute__ ((aligned (16)));
+static signed char svsc[16] __attribute__ ((aligned (16)));
+static unsigned char svbc[16] __attribute__ ((aligned (16)));
+static unsigned short svus[8] __attribute__ ((aligned (16)));
+static signed short svss[8] __attribute__ ((aligned (16)));
+static unsigned short svbs[8] __attribute__ ((aligned (16)));
+static unsigned short svp[8] __attribute__ ((aligned (16)));
+static unsigned int svui[4] __attribute__ ((aligned (16)));
+static signed int svsi[4] __attribute__ ((aligned (16)));
+static unsigned int svbi[4] __attribute__ ((aligned (16)));
+static float svf[4] __attribute__ ((aligned (16)));
+
+static void init ()
+{
+  unsigned int i;
+  for (i = 0; i < 16; ++i)
+    {
+      svuc[i] = i;
+      svsc[i] = i - 8;
+      svbc[i] = (i % 2) ? 0xff : 0;
+    }
+  for (i = 0; i < 8; ++i)
+    {
+      svus[i] = i;
+      svss[i] = i - 4;
+      svbs[i] = (i % 2) ? 0xffff : 0;
+      svp[i] = i;
+    }
+  for (i = 0; i < 4; ++i)
+    {
+      svui[i] = i;
+      svsi[i] = i - 2;
+      svbi[i] = (i % 2) ? 0xffffffff : 0;
+      svf[i] = i * 1.0f;
+    }
+}
+
+static void test ()
+{
+  vector unsigned char evuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+  vector signed char evsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
+  vector bool char evbc = {0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255};
+  vector unsigned short evus = {0,1,2,3,4,5,6,7};
+  vector signed short evss = {-4,-3,-2,-1,0,1,2,3};
+  vector bool short evbs = {0,65535,0,65535,0,65535,0,65535};
+  vector pixel evp = {0,1,2,3,4,5,6,7};
+  vector unsigned int evui = {0,1,2,3};
+  vector signed int evsi = {-2,-1,0,1};
+  vector bool int evbi = {0,0xffffffff,0,0xffffffff};
+  vector float evf = {0.0,1.0,2.0,3.0};
+
+  vector unsigned char vuc;
+  vector signed char vsc;
+  vector bool char vbc;
+  vector unsigned short vus;
+  vector signed short vss;
+  vector bool short vbs;
+  vector pixel vp;
+  vector unsigned int vui;
+  vector signed int vsi;
+  vector bool int vbi;
+  vector float vf;
+
+  init ();
+
+  vuc = vec_ld (0, (vector unsigned char *)svuc);
+  vsc = vec_ld (0, (vector signed char *)svsc);
+  vbc = vec_ld (0, (vector bool char *)svbc);
+  vus = vec_ld (0, (vector unsigned short *)svus);
+  vss = vec_ld (0, (vector signed short *)svss);
+  vbs = vec_ld (0, (vector bool short *)svbs);
+  vp  = vec_ld (0, (vector pixel *)svp);
+  vui = vec_ld (0, (vector unsigned int *)svui);
+  vsi = vec_ld (0, (vector signed int *)svsi);
+  vbi = vec_ld (0, (vector bool int *)svbi);
+  vf  = vec_ld (0, (vector float *)svf);
+
+  check (vec_all_eq (vuc, evuc), "vuc");
+  check (vec_all_eq (vsc, evsc), "vsc");
+  check (vec_all_eq (vbc, evbc), "vbc");
+  check (vec_all_eq (vus, evus), "vus");
+  check (vec_all_eq (vss, evss), "vss");
+  check (vec_all_eq (vbs, evbs), "vbs");
+  check (vec_all_eq (vp,  evp ), "vp" );
+  check (vec_all_eq (vui, evui), "vui");
+  check (vec_all_eq (vsi, evsi), "vsi");
+  check (vec_all_eq (vbi, evbi), "vbi");
+  check (vec_all_eq (vf,  evf ), "vf" );
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ldl-be-order.c b/gcc/testsuite/gcc.dg/vmx/ldl-be-order.c
new file mode 100644 (file)
index 0000000..397849f
--- /dev/null
@@ -0,0 +1,107 @@
+/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
+
+#include "harness.h"
+
+static unsigned char svuc[16] __attribute__ ((aligned (16)));
+static signed char svsc[16] __attribute__ ((aligned (16)));
+static unsigned char svbc[16] __attribute__ ((aligned (16)));
+static unsigned short svus[8] __attribute__ ((aligned (16)));
+static signed short svss[8] __attribute__ ((aligned (16)));
+static unsigned short svbs[8] __attribute__ ((aligned (16)));
+static unsigned short svp[8] __attribute__ ((aligned (16)));
+static unsigned int svui[4] __attribute__ ((aligned (16)));
+static signed int svsi[4] __attribute__ ((aligned (16)));
+static unsigned int svbi[4] __attribute__ ((aligned (16)));
+static float svf[4] __attribute__ ((aligned (16)));
+
+static void init ()
+{
+  unsigned int i;
+  for (i = 0; i < 16; ++i)
+    {
+      svuc[i] = i;
+      svsc[i] = i - 8;
+      svbc[i] = (i % 2) ? 0xff : 0;
+    }
+  for (i = 0; i < 8; ++i)
+    {
+      svus[i] = i;
+      svss[i] = i - 4;
+      svbs[i] = (i % 2) ? 0xffff : 0;
+      svp[i] = i;
+    }
+  for (i = 0; i < 4; ++i)
+    {
+      svui[i] = i;
+      svsi[i] = i - 2;
+      svbi[i] = (i % 2) ? 0xffffffff : 0;
+      svf[i] = i * 1.0f;
+    }
+}
+
+static void test ()
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+  vector unsigned char evuc = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  vector signed char evsc = {7,6,5,4,3,2,1,0,-1,-2,-3,-4,-5,-6,-7,-8};
+  vector bool char evbc = {255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0};
+  vector unsigned short evus = {7,6,5,4,3,2,1,0};
+  vector signed short evss = {3,2,1,0,-1,-2,-3,-4};
+  vector bool short evbs = {65535,0,65535,0,65535,0,65535,0};
+  vector pixel evp = {7,6,5,4,3,2,1,0};
+  vector unsigned int evui = {3,2,1,0};
+  vector signed int evsi = {1,0,-1,-2};
+  vector bool int evbi = {0xffffffff,0,0xffffffff,0};
+  vector float evf = {3.0,2.0,1.0,0.0};
+#else
+  vector unsigned char evuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+  vector signed char evsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
+  vector bool char evbc = {0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255};
+  vector unsigned short evus = {0,1,2,3,4,5,6,7};
+  vector signed short evss = {-4,-3,-2,-1,0,1,2,3};
+  vector bool short evbs = {0,65535,0,65535,0,65535,0,65535};
+  vector pixel evp = {0,1,2,3,4,5,6,7};
+  vector unsigned int evui = {0,1,2,3};
+  vector signed int evsi = {-2,-1,0,1};
+  vector bool int evbi = {0,0xffffffff,0,0xffffffff};
+  vector float evf = {0.0,1.0,2.0,3.0};
+#endif
+
+  vector unsigned char vuc;
+  vector signed char vsc;
+  vector bool char vbc;
+  vector unsigned short vus;
+  vector signed short vss;
+  vector bool short vbs;
+  vector pixel vp;
+  vector unsigned int vui;
+  vector signed int vsi;
+  vector bool int vbi;
+  vector float vf;
+
+  init ();
+
+  vuc = vec_ldl (0, (vector unsigned char *)svuc);
+  vsc = vec_ldl (0, (vector signed char *)svsc);
+  vbc = vec_ldl (0, (vector bool char *)svbc);
+  vus = vec_ldl (0, (vector unsigned short *)svus);
+  vss = vec_ldl (0, (vector signed short *)svss);
+  vbs = vec_ldl (0, (vector bool short *)svbs);
+  vp  = vec_ldl (0, (vector pixel *)svp);
+  vui = vec_ldl (0, (vector unsigned int *)svui);
+  vsi = vec_ldl (0, (vector signed int *)svsi);
+  vbi = vec_ldl (0, (vector bool int *)svbi);
+  vf  = vec_ldl (0, (vector float *)svf);
+
+  check (vec_all_eq (vuc, evuc), "vuc");
+  check (vec_all_eq (vsc, evsc), "vsc");
+  check (vec_all_eq (vbc, evbc), "vbc");
+  check (vec_all_eq (vus, evus), "vus");
+  check (vec_all_eq (vss, evss), "vss");
+  check (vec_all_eq (vbs, evbs), "vbs");
+  check (vec_all_eq (vp,  evp ), "vp" );
+  check (vec_all_eq (vui, evui), "vui");
+  check (vec_all_eq (vsi, evsi), "vsi");
+  check (vec_all_eq (vbi, evbi), "vbi");
+  check (vec_all_eq (vf,  evf ), "vf" );
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ldl-vsx-be-order.c b/gcc/testsuite/gcc.dg/vmx/ldl-vsx-be-order.c
new file mode 100644 (file)
index 0000000..6a5cdd4
--- /dev/null
@@ -0,0 +1,40 @@
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */
+
+#include "harness.h"
+
+static unsigned long svul[2] __attribute__ ((aligned (16)));
+static double svd[2] __attribute__ ((aligned (16)));
+
+static void init ()
+{
+  unsigned int i;
+  for (i = 0; i < 2; ++i)
+    {
+      svul[i] = i;
+      svd[i] = i * 1.0;
+    }
+}
+
+static void test ()
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+  vector unsigned long evul = {1,0};
+  vector double evd = {1.0,0.0};
+#else
+  vector unsigned long evul = {0,1};
+  vector double evd = {0.0,1.0};
+#endif
+
+  vector unsigned long vul;
+  vector double vd;
+
+  init ();
+
+  vul = vec_ldl (0, (vector unsigned long *)svul);
+  vd  = vec_ldl (0, (vector double *)svd);
+
+  check (vec_all_eq (vul, evul), "vul");
+  check (vec_all_eq (vd,  evd ), "vd" );
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ldl-vsx.c b/gcc/testsuite/gcc.dg/vmx/ldl-vsx.c
new file mode 100644 (file)
index 0000000..c330413
--- /dev/null
@@ -0,0 +1,35 @@
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-maltivec -mabi=altivec -std=gnu99 -mvsx" } */
+
+#include "harness.h"
+
+static unsigned long svul[2] __attribute__ ((aligned (16)));
+static double svd[2] __attribute__ ((aligned (16)));
+
+static void init ()
+{
+  unsigned int i;
+  for (i = 0; i < 2; ++i)
+    {
+      svul[i] = i;
+      svd[i] = i * 1.0;
+    }
+}
+
+static void test ()
+{
+  vector unsigned long evul = {0,1};
+  vector double evd = {0.0,1.0};
+
+  vector unsigned long vul;
+  vector double vd;
+
+  init ();
+
+  vul = vec_ldl (0, (vector unsigned long *)svul);
+  vd  = vec_ldl (0, (vector double *)svd);
+
+  check (vec_all_eq (vul, evul), "vul");
+  check (vec_all_eq (vd,  evd ), "vd" );
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ldl.c b/gcc/testsuite/gcc.dg/vmx/ldl.c
new file mode 100644 (file)
index 0000000..3f9a603
--- /dev/null
@@ -0,0 +1,91 @@
+#include "harness.h"
+
+static unsigned char svuc[16] __attribute__ ((aligned (16)));
+static signed char svsc[16] __attribute__ ((aligned (16)));
+static unsigned char svbc[16] __attribute__ ((aligned (16)));
+static unsigned short svus[8] __attribute__ ((aligned (16)));
+static signed short svss[8] __attribute__ ((aligned (16)));
+static unsigned short svbs[8] __attribute__ ((aligned (16)));
+static unsigned short svp[8] __attribute__ ((aligned (16)));
+static unsigned int svui[4] __attribute__ ((aligned (16)));
+static signed int svsi[4] __attribute__ ((aligned (16)));
+static unsigned int svbi[4] __attribute__ ((aligned (16)));
+static float svf[4] __attribute__ ((aligned (16)));
+
+static void init ()
+{
+  unsigned int i;
+  for (i = 0; i < 16; ++i)
+    {
+      svuc[i] = i;
+      svsc[i] = i - 8;
+      svbc[i] = (i % 2) ? 0xff : 0;
+    }
+  for (i = 0; i < 8; ++i)
+    {
+      svus[i] = i;
+      svss[i] = i - 4;
+      svbs[i] = (i % 2) ? 0xffff : 0;
+      svp[i] = i;
+    }
+  for (i = 0; i < 4; ++i)
+    {
+      svui[i] = i;
+      svsi[i] = i - 2;
+      svbi[i] = (i % 2) ? 0xffffffff : 0;
+      svf[i] = i * 1.0f;
+    }
+}
+
+static void test ()
+{
+  vector unsigned char evuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+  vector signed char evsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
+  vector bool char evbc = {0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255};
+  vector unsigned short evus = {0,1,2,3,4,5,6,7};
+  vector signed short evss = {-4,-3,-2,-1,0,1,2,3};
+  vector bool short evbs = {0,65535,0,65535,0,65535,0,65535};
+  vector pixel evp = {0,1,2,3,4,5,6,7};
+  vector unsigned int evui = {0,1,2,3};
+  vector signed int evsi = {-2,-1,0,1};
+  vector bool int evbi = {0,0xffffffff,0,0xffffffff};
+  vector float evf = {0.0,1.0,2.0,3.0};
+
+  vector unsigned char vuc;
+  vector signed char vsc;
+  vector bool char vbc;
+  vector unsigned short vus;
+  vector signed short vss;
+  vector bool short vbs;
+  vector pixel vp;
+  vector unsigned int vui;
+  vector signed int vsi;
+  vector bool int vbi;
+  vector float vf;
+
+  init ();
+
+  vuc = vec_ldl (0, (vector unsigned char *)svuc);
+  vsc = vec_ldl (0, (vector signed char *)svsc);
+  vbc = vec_ldl (0, (vector bool char *)svbc);
+  vus = vec_ldl (0, (vector unsigned short *)svus);
+  vss = vec_ldl (0, (vector signed short *)svss);
+  vbs = vec_ldl (0, (vector bool short *)svbs);
+  vp  = vec_ldl (0, (vector pixel *)svp);
+  vui = vec_ldl (0, (vector unsigned int *)svui);
+  vsi = vec_ldl (0, (vector signed int *)svsi);
+  vbi = vec_ldl (0, (vector bool int *)svbi);
+  vf  = vec_ldl (0, (vector float *)svf);
+
+  check (vec_all_eq (vuc, evuc), "vuc");
+  check (vec_all_eq (vsc, evsc), "vsc");
+  check (vec_all_eq (vbc, evbc), "vbc");
+  check (vec_all_eq (vus, evus), "vus");
+  check (vec_all_eq (vss, evss), "vss");
+  check (vec_all_eq (vbs, evbs), "vbs");
+  check (vec_all_eq (vp,  evp ), "vp" );
+  check (vec_all_eq (vui, evui), "vui");
+  check (vec_all_eq (vsi, evsi), "vsi");
+  check (vec_all_eq (vbi, evbi), "vbi");
+  check (vec_all_eq (vf,  evf ), "vf" );
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/st-be-order.c b/gcc/testsuite/gcc.dg/vmx/st-be-order.c
new file mode 100644 (file)
index 0000000..1a7b01b
--- /dev/null
@@ -0,0 +1,83 @@
+/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
+
+#include "harness.h"
+
+static unsigned char svuc[16] __attribute__ ((aligned (16)));
+static signed char svsc[16] __attribute__ ((aligned (16)));
+static unsigned char svbc[16] __attribute__ ((aligned (16)));
+static unsigned short svus[8] __attribute__ ((aligned (16)));
+static signed short svss[8] __attribute__ ((aligned (16)));
+static unsigned short svbs[8] __attribute__ ((aligned (16)));
+static unsigned short svp[8] __attribute__ ((aligned (16)));
+static unsigned int svui[4] __attribute__ ((aligned (16)));
+static signed int svsi[4] __attribute__ ((aligned (16)));
+static unsigned int svbi[4] __attribute__ ((aligned (16)));
+static float svf[4] __attribute__ ((aligned (16)));
+
+static void check_arrays ()
+{
+  unsigned int i;
+  for (i = 0; i < 16; ++i)
+    {
+      check (svuc[i] == i, "svuc");
+      check (svsc[i] == i - 8, "svsc");
+      check (svbc[i] == ((i % 2) ? 0xff : 0), "svbc");
+    }
+  for (i = 0; i < 8; ++i)
+    {
+      check (svus[i] == i, "svus");
+      check (svss[i] == i - 4, "svss");
+      check (svbs[i] == ((i % 2) ? 0xffff : 0), "svbs");
+      check (svp[i] == i, "svp");
+    }
+  for (i = 0; i < 4; ++i)
+    {
+      check (svui[i] == i, "svui");
+      check (svsi[i] == i - 2, "svsi");
+      check (svbi[i] == ((i % 2) ? 0xffffffff : 0), "svbi");
+      check (svf[i] == i * 1.0f, "svf");
+    }
+}
+
+static void test ()
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+  vector unsigned char vuc = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  vector signed char vsc = {7,6,5,4,3,2,1,0,-1,-2,-3,-4,-5,-6,-7,-8};
+  vector bool char vbc = {255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0};
+  vector unsigned short vus = {7,6,5,4,3,2,1,0};
+  vector signed short vss = {3,2,1,0,-1,-2,-3,-4};
+  vector bool short vbs = {65535,0,65535,0,65535,0,65535,0};
+  vector pixel vp = {7,6,5,4,3,2,1,0};
+  vector unsigned int vui = {3,2,1,0};
+  vector signed int vsi = {1,0,-1,-2};
+  vector bool int vbi = {0xffffffff,0,0xffffffff,0};
+  vector float vf = {3.0,2.0,1.0,0.0};
+#else
+  vector unsigned char vuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+  vector signed char vsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
+  vector bool char vbc = {0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255};
+  vector unsigned short vus = {0,1,2,3,4,5,6,7};
+  vector signed short vss = {-4,-3,-2,-1,0,1,2,3};
+  vector bool short vbs = {0,65535,0,65535,0,65535,0,65535};
+  vector pixel vp = {0,1,2,3,4,5,6,7};
+  vector unsigned int vui = {0,1,2,3};
+  vector signed int vsi = {-2,-1,0,1};
+  vector bool int vbi = {0,0xffffffff,0,0xffffffff};
+  vector float vf = {0.0,1.0,2.0,3.0};
+#endif
+
+  vec_st (vuc, 0, (vector unsigned char *)svuc);
+  vec_st (vsc, 0, (vector signed char *)svsc);
+  vec_st (vbc, 0, (vector bool char *)svbc);
+  vec_st (vus, 0, (vector unsigned short *)svus);
+  vec_st (vss, 0, (vector signed short *)svss);
+  vec_st (vbs, 0, (vector bool short *)svbs);
+  vec_st (vp,  0, (vector pixel *)svp);
+  vec_st (vui, 0, (vector unsigned int *)svui);
+  vec_st (vsi, 0, (vector signed int *)svsi);
+  vec_st (vbi, 0, (vector bool int *)svbi);
+  vec_st (vf,  0, (vector float *)svf);
+
+  check_arrays ();
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/st-vsx-be-order.c b/gcc/testsuite/gcc.dg/vmx/st-vsx-be-order.c
new file mode 100644 (file)
index 0000000..6656a8a
--- /dev/null
@@ -0,0 +1,34 @@
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */
+
+#include "harness.h"
+
+static unsigned long svul[2] __attribute__ ((aligned (16)));
+static double svd[2] __attribute__ ((aligned (16)));
+
+static void check_arrays ()
+{
+  unsigned int i;
+  for (i = 0; i < 2; ++i)
+    {
+      check (svul[i] == i, "svul");
+      check (svd[i] == i * 1.0, "svd");
+    }
+}
+
+static void test ()
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+  vector unsigned long vul = {1,0};
+  vector double vd = {1.0,0.0};
+#else
+  vector unsigned long vul = {0,1};
+  vector double vd = {0.0,1.0};
+#endif
+
+  vec_st (vul, 0, (vector unsigned long *)svul);
+  vec_st (vd,  0, (vector double *)svd);
+
+  check_arrays ();
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/st-vsx.c b/gcc/testsuite/gcc.dg/vmx/st-vsx.c
new file mode 100644 (file)
index 0000000..8505ea9
--- /dev/null
@@ -0,0 +1,29 @@
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-maltivec -mabi=altivec -std=gnu99 -mvsx" } */
+
+#include "harness.h"
+
+static unsigned long svul[2] __attribute__ ((aligned (16)));
+static double svd[2] __attribute__ ((aligned (16)));
+
+static void check_arrays ()
+{
+  unsigned int i;
+  for (i = 0; i < 2; ++i)
+    {
+      check (svul[i] == i, "svul");
+      check (svd[i] == i * 1.0, "svd");
+    }
+}
+
+static void test ()
+{
+  vector unsigned long vul = {0,1};
+  vector double vd = {0.0,1.0};
+
+  vec_st (vul, 0, (vector unsigned long *)svul);
+  vec_st (vd,  0, (vector double *)svd);
+
+  check_arrays ();
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/st.c b/gcc/testsuite/gcc.dg/vmx/st.c
new file mode 100644 (file)
index 0000000..3339b72
--- /dev/null
@@ -0,0 +1,67 @@
+#include "harness.h"
+
+static unsigned char svuc[16] __attribute__ ((aligned (16)));
+static signed char svsc[16] __attribute__ ((aligned (16)));
+static unsigned char svbc[16] __attribute__ ((aligned (16)));
+static unsigned short svus[8] __attribute__ ((aligned (16)));
+static signed short svss[8] __attribute__ ((aligned (16)));
+static unsigned short svbs[8] __attribute__ ((aligned (16)));
+static unsigned short svp[8] __attribute__ ((aligned (16)));
+static unsigned int svui[4] __attribute__ ((aligned (16)));
+static signed int svsi[4] __attribute__ ((aligned (16)));
+static unsigned int svbi[4] __attribute__ ((aligned (16)));
+static float svf[4] __attribute__ ((aligned (16)));
+
+static void check_arrays ()
+{
+  unsigned int i;
+  for (i = 0; i < 16; ++i)
+    {
+      check (svuc[i] == i, "svuc");
+      check (svsc[i] == i - 8, "svsc");
+      check (svbc[i] == ((i % 2) ? 0xff : 0), "svbc");
+    }
+  for (i = 0; i < 8; ++i)
+    {
+      check (svus[i] == i, "svus");
+      check (svss[i] == i - 4, "svss");
+      check (svbs[i] == ((i % 2) ? 0xffff : 0), "svbs");
+      check (svp[i] == i, "svp");
+    }
+  for (i = 0; i < 4; ++i)
+    {
+      check (svui[i] == i, "svui");
+      check (svsi[i] == i - 2, "svsi");
+      check (svbi[i] == ((i % 2) ? 0xffffffff : 0), "svbi");
+      check (svf[i] == i * 1.0f, "svf");
+    }
+}
+
+static void test ()
+{
+  vector unsigned char vuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+  vector signed char vsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
+  vector bool char vbc = {0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255};
+  vector unsigned short vus = {0,1,2,3,4,5,6,7};
+  vector signed short vss = {-4,-3,-2,-1,0,1,2,3};
+  vector bool short vbs = {0,65535,0,65535,0,65535,0,65535};
+  vector pixel vp = {0,1,2,3,4,5,6,7};
+  vector unsigned int vui = {0,1,2,3};
+  vector signed int vsi = {-2,-1,0,1};
+  vector bool int vbi = {0,0xffffffff,0,0xffffffff};
+  vector float vf = {0.0,1.0,2.0,3.0};
+
+  vec_st (vuc, 0, (vector unsigned char *)svuc);
+  vec_st (vsc, 0, (vector signed char *)svsc);
+  vec_st (vbc, 0, (vector bool char *)svbc);
+  vec_st (vus, 0, (vector unsigned short *)svus);
+  vec_st (vss, 0, (vector signed short *)svss);
+  vec_st (vbs, 0, (vector bool short *)svbs);
+  vec_st (vp,  0, (vector pixel *)svp);
+  vec_st (vui, 0, (vector unsigned int *)svui);
+  vec_st (vsi, 0, (vector signed int *)svsi);
+  vec_st (vbi, 0, (vector bool int *)svbi);
+  vec_st (vf,  0, (vector float *)svf);
+
+  check_arrays ();
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/stl-be-order.c b/gcc/testsuite/gcc.dg/vmx/stl-be-order.c
new file mode 100644 (file)
index 0000000..7f00a03
--- /dev/null
@@ -0,0 +1,83 @@
+/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
+
+#include "harness.h"
+
+static unsigned char svuc[16] __attribute__ ((aligned (16)));
+static signed char svsc[16] __attribute__ ((aligned (16)));
+static unsigned char svbc[16] __attribute__ ((aligned (16)));
+static unsigned short svus[8] __attribute__ ((aligned (16)));
+static signed short svss[8] __attribute__ ((aligned (16)));
+static unsigned short svbs[8] __attribute__ ((aligned (16)));
+static unsigned short svp[8] __attribute__ ((aligned (16)));
+static unsigned int svui[4] __attribute__ ((aligned (16)));
+static signed int svsi[4] __attribute__ ((aligned (16)));
+static unsigned int svbi[4] __attribute__ ((aligned (16)));
+static float svf[4] __attribute__ ((aligned (16)));
+
+static void check_arrays ()
+{
+  unsigned int i;
+  for (i = 0; i < 16; ++i)
+    {
+      check (svuc[i] == i, "svuc");
+      check (svsc[i] == i - 8, "svsc");
+      check (svbc[i] == ((i % 2) ? 0xff : 0), "svbc");
+    }
+  for (i = 0; i < 8; ++i)
+    {
+      check (svus[i] == i, "svus");
+      check (svss[i] == i - 4, "svss");
+      check (svbs[i] == ((i % 2) ? 0xffff : 0), "svbs");
+      check (svp[i] == i, "svp");
+    }
+  for (i = 0; i < 4; ++i)
+    {
+      check (svui[i] == i, "svui");
+      check (svsi[i] == i - 2, "svsi");
+      check (svbi[i] == ((i % 2) ? 0xffffffff : 0), "svbi");
+      check (svf[i] == i * 1.0f, "svf");
+    }
+}
+
+static void test ()
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+  vector unsigned char vuc = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  vector signed char vsc = {7,6,5,4,3,2,1,0,-1,-2,-3,-4,-5,-6,-7,-8};
+  vector bool char vbc = {255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0};
+  vector unsigned short vus = {7,6,5,4,3,2,1,0};
+  vector signed short vss = {3,2,1,0,-1,-2,-3,-4};
+  vector bool short vbs = {65535,0,65535,0,65535,0,65535,0};
+  vector pixel vp = {7,6,5,4,3,2,1,0};
+  vector unsigned int vui = {3,2,1,0};
+  vector signed int vsi = {1,0,-1,-2};
+  vector bool int vbi = {0xffffffff,0,0xffffffff,0};
+  vector float vf = {3.0,2.0,1.0,0.0};
+#else
+  vector unsigned char vuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+  vector signed char vsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
+  vector bool char vbc = {0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255};
+  vector unsigned short vus = {0,1,2,3,4,5,6,7};
+  vector signed short vss = {-4,-3,-2,-1,0,1,2,3};
+  vector bool short vbs = {0,65535,0,65535,0,65535,0,65535};
+  vector pixel vp = {0,1,2,3,4,5,6,7};
+  vector unsigned int vui = {0,1,2,3};
+  vector signed int vsi = {-2,-1,0,1};
+  vector bool int vbi = {0,0xffffffff,0,0xffffffff};
+  vector float vf = {0.0,1.0,2.0,3.0};
+#endif
+
+  vec_stl (vuc, 0, (vector unsigned char *)svuc);
+  vec_stl (vsc, 0, (vector signed char *)svsc);
+  vec_stl (vbc, 0, (vector bool char *)svbc);
+  vec_stl (vus, 0, (vector unsigned short *)svus);
+  vec_stl (vss, 0, (vector signed short *)svss);
+  vec_stl (vbs, 0, (vector bool short *)svbs);
+  vec_stl (vp,  0, (vector pixel *)svp);
+  vec_stl (vui, 0, (vector unsigned int *)svui);
+  vec_stl (vsi, 0, (vector signed int *)svsi);
+  vec_stl (vbi, 0, (vector bool int *)svbi);
+  vec_stl (vf,  0, (vector float *)svf);
+
+  check_arrays ();
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/stl-vsx-be-order.c b/gcc/testsuite/gcc.dg/vmx/stl-vsx-be-order.c
new file mode 100644 (file)
index 0000000..14ac800
--- /dev/null
@@ -0,0 +1,34 @@
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */
+
+#include "harness.h"
+
+static unsigned long svul[2] __attribute__ ((aligned (16)));
+static double svd[2] __attribute__ ((aligned (16)));
+
+static void check_arrays ()
+{
+  unsigned int i;
+  for (i = 0; i < 2; ++i)
+    {
+      check (svul[i] == i, "svul");
+      check (svd[i] == i * 1.0, "svd");
+    }
+}
+
+static void test ()
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+  vector unsigned long vul = {1,0};
+  vector double vd = {1.0,0.0};
+#else
+  vector unsigned long vul = {0,1};
+  vector double vd = {0.0,1.0};
+#endif
+
+  vec_stl (vul, 0, (vector unsigned long *)svul);
+  vec_stl (vd,  0, (vector double *)svd);
+
+  check_arrays ();
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/stl-vsx.c b/gcc/testsuite/gcc.dg/vmx/stl-vsx.c
new file mode 100644 (file)
index 0000000..7e7be7b
--- /dev/null
@@ -0,0 +1,29 @@
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-maltivec -mabi=altivec -std=gnu99 -mvsx" } */
+
+#include "harness.h"
+
+static unsigned long svul[2] __attribute__ ((aligned (16)));
+static double svd[2] __attribute__ ((aligned (16)));
+
+static void check_arrays ()
+{
+  unsigned int i;
+  for (i = 0; i < 2; ++i)
+    {
+      check (svul[i] == i, "svul");
+      check (svd[i] == i * 1.0, "svd");
+    }
+}
+
+static void test ()
+{
+  vector unsigned long vul = {0,1};
+  vector double vd = {0.0,1.0};
+
+  vec_stl (vul, 0, (vector unsigned long *)svul);
+  vec_stl (vd,  0, (vector double *)svd);
+
+  check_arrays ();
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/stl.c b/gcc/testsuite/gcc.dg/vmx/stl.c
new file mode 100644 (file)
index 0000000..9ebd878
--- /dev/null
@@ -0,0 +1,67 @@
+#include "harness.h"
+
+static unsigned char svuc[16] __attribute__ ((aligned (16)));
+static signed char svsc[16] __attribute__ ((aligned (16)));
+static unsigned char svbc[16] __attribute__ ((aligned (16)));
+static unsigned short svus[8] __attribute__ ((aligned (16)));
+static signed short svss[8] __attribute__ ((aligned (16)));
+static unsigned short svbs[8] __attribute__ ((aligned (16)));
+static unsigned short svp[8] __attribute__ ((aligned (16)));
+static unsigned int svui[4] __attribute__ ((aligned (16)));
+static signed int svsi[4] __attribute__ ((aligned (16)));
+static unsigned int svbi[4] __attribute__ ((aligned (16)));
+static float svf[4] __attribute__ ((aligned (16)));
+
+static void check_arrays ()
+{
+  unsigned int i;
+  for (i = 0; i < 16; ++i)
+    {
+      check (svuc[i] == i, "svuc");
+      check (svsc[i] == i - 8, "svsc");
+      check (svbc[i] == ((i % 2) ? 0xff : 0), "svbc");
+    }
+  for (i = 0; i < 8; ++i)
+    {
+      check (svus[i] == i, "svus");
+      check (svss[i] == i - 4, "svss");
+      check (svbs[i] == ((i % 2) ? 0xffff : 0), "svbs");
+      check (svp[i] == i, "svp");
+    }
+  for (i = 0; i < 4; ++i)
+    {
+      check (svui[i] == i, "svui");
+      check (svsi[i] == i - 2, "svsi");
+      check (svbi[i] == ((i % 2) ? 0xffffffff : 0), "svbi");
+      check (svf[i] == i * 1.0f, "svf");
+    }
+}
+
+static void test ()
+{
+  vector unsigned char vuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+  vector signed char vsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
+  vector bool char vbc = {0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255};
+  vector unsigned short vus = {0,1,2,3,4,5,6,7};
+  vector signed short vss = {-4,-3,-2,-1,0,1,2,3};
+  vector bool short vbs = {0,65535,0,65535,0,65535,0,65535};
+  vector pixel vp = {0,1,2,3,4,5,6,7};
+  vector unsigned int vui = {0,1,2,3};
+  vector signed int vsi = {-2,-1,0,1};
+  vector bool int vbi = {0,0xffffffff,0,0xffffffff};
+  vector float vf = {0.0,1.0,2.0,3.0};
+
+  vec_stl (vuc, 0, (vector unsigned char *)svuc);
+  vec_stl (vsc, 0, (vector signed char *)svsc);
+  vec_stl (vbc, 0, (vector bool char *)svbc);
+  vec_stl (vus, 0, (vector unsigned short *)svus);
+  vec_stl (vss, 0, (vector signed short *)svss);
+  vec_stl (vbs, 0, (vector bool short *)svbs);
+  vec_stl (vp,  0, (vector pixel *)svp);
+  vec_stl (vui, 0, (vector unsigned int *)svui);
+  vec_stl (vsi, 0, (vector signed int *)svsi);
+  vec_stl (vbi, 0, (vector bool int *)svbi);
+  vec_stl (vf,  0, (vector float *)svf);
+
+  check_arrays ();
+}