+2018-10-29 Paul A. Clarke <pc@us.ibm.com>
+
+ * gcc/config/rs6000/mmintrin.h (_mm_packs_pi16, _mm_packs_pi32,
+ _mm_packs_pu16, _mm_unpackhi_pi8, _mm_unpacklo_pi8, _mm_add_pi8,
+ _mm_add_pi16, _mm_add_pi32, _mm_sub_pi8, _mm_sub_pi16, _mm_sub_pi32,
+ _mm_cmpgt_pi8, _mm_cmpeq_pi16, _mm_cmpgt_pi16, _mm_cmpeq_pi32,
+ _mm_cmpgt_pi32, _mm_adds_pi8, _mm_adds_pi16, _mm_adds_pu8,
+ _mm_adds_pu16, _mm_subs_pi8, _mm_subs_pi16, _mm_subs_pu8,
+ _mm_subs_pu16, _mm_madd_pi16, _mm_mulhi_pi16, _mm_mullo_pi16,
+ _mm_sll_pi16, _mm_sra_pi16, _mm_srl_pi16, _mm_set1_pi16, _mm_set1_pi8):
+ Change 'vector' to '__vector'.
+ * gcc/config/rs6000/xmmintrin.h (_mm_cvtps_pi32, _mm_cvttps_pi32,
+ _mm_cvtps_pi16, _mm_cvtps_pi8, _mm_max_pi16, _mm_max_pu8, _mm_min_pi16,
+ _mm_min_pu8, _mm_mulhi_pu16, _mm_shuffle_pi16, _mm_avg_pu8,
+ _mm_avg_pu16): Likewise. And, whitespace corrections.
+
2018-10-29 Richard Biener <rguenther@suse.de>
PR tree-optimization/87785
vm1 = (__vector signed short) (__vector unsigned long long) { __m2, __m1 };
vresult = vec_vpkshss (vm1, vm1);
- return (__m64) ((vector long long) vresult)[0];
+ return (__m64) ((__vector long long) vresult)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
vm1 = (__vector signed int) (__vector unsigned long long) { __m2, __m1 };
vresult = vec_vpkswss (vm1, vm1);
- return (__m64) ((vector long long) vresult)[0];
+ return (__m64) ((__vector long long) vresult)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
vm1 = (__vector signed short) (__vector unsigned long long) { __m2, __m1 };
vresult = vec_vpkshus (vm1, vm1);
- return (__m64) ((vector long long) vresult)[0];
+ return (__m64) ((__vector long long) vresult)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a = (__vector unsigned char)vec_splats (__m1);
b = (__vector unsigned char)vec_splats (__m2);
c = vec_mergel (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
a = (__vector unsigned char)vec_splats (__m1);
b = (__vector unsigned char)vec_splats (__m2);
c = vec_mergel (a, b);
- return (__m64) ((vector long long) c)[1];
+ return (__m64) ((__vector long long) c)[1];
#else
__m64_union m1, m2, res;
a = (__vector signed char)vec_splats (__m1);
b = (__vector signed char)vec_splats (__m2);
c = vec_add (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = vec_add (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
a = (__vector signed int)vec_splats (__m1);
b = (__vector signed int)vec_splats (__m2);
c = vec_add (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
a = (__vector signed char)vec_splats (__m1);
b = (__vector signed char)vec_splats (__m2);
c = vec_sub (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = vec_sub (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
a = (__vector signed int)vec_splats (__m1);
b = (__vector signed int)vec_splats (__m2);
c = vec_sub (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
a = (__vector signed char)vec_splats (__m1);
b = (__vector signed char)vec_splats (__m2);
c = (__vector signed char)vec_cmpgt (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = (__vector signed short)vec_cmpeq (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = (__vector signed short)vec_cmpgt (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
a = (__vector signed int)vec_splats (__m1);
b = (__vector signed int)vec_splats (__m2);
c = (__vector signed int)vec_cmpeq (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
a = (__vector signed int)vec_splats (__m1);
b = (__vector signed int)vec_splats (__m2);
c = (__vector signed int)vec_cmpgt (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
a = (__vector signed char)vec_splats (__m1);
b = (__vector signed char)vec_splats (__m2);
c = vec_adds (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = vec_adds (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a = (__vector unsigned char)vec_splats (__m1);
b = (__vector unsigned char)vec_splats (__m2);
c = vec_adds (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a = (__vector unsigned short)vec_splats (__m1);
b = (__vector unsigned short)vec_splats (__m2);
c = vec_adds (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a = (__vector signed char)vec_splats (__m1);
b = (__vector signed char)vec_splats (__m2);
c = vec_subs (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = vec_subs (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a = (__vector unsigned char)vec_splats (__m1);
b = (__vector unsigned char)vec_splats (__m2);
c = vec_subs (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a = (__vector unsigned short)vec_splats (__m1);
b = (__vector unsigned short)vec_splats (__m2);
c = vec_subs (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = vec_vmsumshm (a, b, zero);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
w1 = vec_vmulosh (a, b);
c = (__vector signed short)vec_perm (w0, w1, xform1);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = a * b;
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
m = (__vector signed short)vec_splats (__m);
c = (__vector unsigned short)vec_splats ((unsigned short)__count);
r = vec_sl (m, (__vector unsigned short)c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
}
else
return (0);
m = (__vector signed short)vec_splats (__m);
c = (__vector unsigned short)vec_splats ((unsigned short)__count);
r = vec_sra (m, (__vector unsigned short)c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
}
else
return (0);
m = (__vector unsigned short)vec_splats (__m);
c = (__vector unsigned short)vec_splats ((unsigned short)__count);
r = vec_sr (m, (__vector unsigned short)c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
}
else
return (0);
__vector signed short w;
w = (__vector signed short)vec_splats (__w);
- return (__m64) ((vector long long) w)[0];
+ return (__m64) ((__vector long long) w)[0];
#else
__m64_union res;
__vector signed char b;
b = (__vector signed char)vec_splats (__b);
- return (__m64) ((vector long long) b)[0];
+ return (__m64) ((__vector long long) b)[0];
#else
__m64_union res;
rounded = vec_rint(temp);
result = (__vector unsigned long long) vec_cts (rounded, 0);
- return (__m64) ((vector long long) result)[0];
+ return (__m64) ((__vector long long) result)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
result = (__vector unsigned long long) vec_cts (temp, 0);
- return (__m64) ((vector long long) result)[0];
+ return (__m64) ((__vector long long) result)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
/* Convert the four signed 32-bit values in A and B to SPFP form. */
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
+_mm_cvtpi32x2_ps (__m64 __A, __m64 __B)
{
__vector signed int vi4;
__vector float vf4;
/* Convert the four SPFP values in A to four signed 16-bit integers. */
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi16(__m128 __A)
+_mm_cvtps_pi16 (__m128 __A)
{
__v4sf rounded;
__vector signed int temp;
temp = vec_cts (rounded, 0);
result = (__vector unsigned long long) vec_pack (temp, temp);
- return (__m64) ((vector long long) result)[0];
+ return (__m64) ((__vector long long) result)[0];
}
/* Convert the four SPFP values in A to four signed 8-bit integers. */
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi8(__m128 __A)
+_mm_cvtps_pi8 (__m128 __A)
{
__v4sf rounded;
__vector signed int tmp_i;
tmp_i = vec_cts (rounded, 0);
tmp_s = vec_pack (tmp_i, zero);
res_v = vec_pack (tmp_s, tmp_s);
- return (__m64) ((vector long long) res_v)[0];
+ return (__m64) ((__vector long long) res_v)[0];
}
/* Selects four specific SPFP values from A and B based on MASK. */
b = (__vector signed short)vec_splats (__B);
c = (__vector __bool short)vec_cmpgt (a, b);
r = vec_sel (b, a, c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
#else
__m64_union m1, m2, res;
b = (__vector unsigned char)vec_splats (__B);
c = (__vector __bool char)vec_cmpgt (a, b);
r = vec_sel (b, a, c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
#else
__m64_union m1, m2, res;
long i;
b = (__vector signed short)vec_splats (__B);
c = (__vector __bool short)vec_cmplt (a, b);
r = vec_sel (b, a, c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
#else
__m64_union m1, m2, res;
b = (__vector unsigned char)vec_splats (__B);
c = (__vector __bool char)vec_cmplt (a, b);
r = vec_sel (b, a, c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
#else
__m64_union m1, m2, res;
long i;
w1 = vec_vmulouh (a, b);
c = (__vector unsigned short)vec_perm (w0, w1, xform1);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
p = vec_splats (t.as_m64);
a = vec_splats (__A);
r = vec_perm (a, a, (__vector unsigned char)p);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a = (__vector unsigned char)vec_splats (__A);
b = (__vector unsigned char)vec_splats (__B);
c = vec_avg (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a = (__vector unsigned short)vec_splats (__A);
b = (__vector unsigned short)vec_splats (__B);
c = vec_avg (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))