const vull __builtin_altivec_vpextd (vull, vull);
VPEXTD vpextd {}
- const vull __builtin_altivec_vreplace_un_uv2di (vull, unsigned long long, \
- const int<4>);
+ const vuc __builtin_altivec_vreplace_un_uv2di (vull, unsigned long long, \
+ const int<4>);
VREPLACE_UN_UV2DI vreplace_un_v2di {}
- const vui __builtin_altivec_vreplace_un_uv4si (vui, unsigned int, \
+ const vuc __builtin_altivec_vreplace_un_uv4si (vui, unsigned int, \
const int<4>);
VREPLACE_UN_UV4SI vreplace_un_v4si {}
- const vd __builtin_altivec_vreplace_un_v2df (vd, double, const int<4>);
+ const vuc __builtin_altivec_vreplace_un_v2df (vd, double, const int<4>);
VREPLACE_UN_V2DF vreplace_un_v2df {}
- const vsll __builtin_altivec_vreplace_un_v2di (vsll, signed long long, \
- const int<4>);
+ const vuc __builtin_altivec_vreplace_un_v2di (vsll, signed long long, \
+ const int<4>);
VREPLACE_UN_V2DI vreplace_un_v2di {}
- const vf __builtin_altivec_vreplace_un_v4sf (vf, float, const int<4>);
+ const vuc __builtin_altivec_vreplace_un_v4sf (vf, float, const int<4>);
VREPLACE_UN_V4SF vreplace_un_v4sf {}
- const vsi __builtin_altivec_vreplace_un_v4si (vsi, signed int, const int<4>);
+ const vuc __builtin_altivec_vreplace_un_v4si (vsi, signed int, const int<4>);
VREPLACE_UN_V4SI vreplace_un_v4si {}
const vull __builtin_altivec_vreplace_uv2di (vull, unsigned long long, \
VREPLACE_ELT_V2DF
[VEC_REPLACE_UN, vec_replace_unaligned, __builtin_vec_replace_un]
- vui __builtin_vec_replace_un (vui, unsigned int, const int);
+ vuc __builtin_vec_replace_un (vui, unsigned int, const int);
VREPLACE_UN_UV4SI
- vsi __builtin_vec_replace_un (vsi, signed int, const int);
+ vuc __builtin_vec_replace_un (vsi, signed int, const int);
VREPLACE_UN_V4SI
- vull __builtin_vec_replace_un (vull, unsigned long long, const int);
+ vuc __builtin_vec_replace_un (vull, unsigned long long, const int);
VREPLACE_UN_UV2DI
- vsll __builtin_vec_replace_un (vsll, signed long long, const int);
+ vuc __builtin_vec_replace_un (vsll, signed long long, const int);
VREPLACE_UN_V2DI
- vf __builtin_vec_replace_un (vf, float, const int);
+ vuc __builtin_vec_replace_un (vf, float, const int);
VREPLACE_UN_V4SF
- vd __builtin_vec_replace_un (vd, double, const int);
+ vuc __builtin_vec_replace_un (vd, double, const int);
VREPLACE_UN_V2DF
[VEC_REVB, vec_revb, __builtin_vec_revb]
}
[(set_attr "type" "vecsimple")])
-(define_expand "vreplace_un_<mode>"
- [(set (match_operand:REPLACE_ELT 0 "register_operand")
- (unspec:REPLACE_ELT [(match_operand:REPLACE_ELT 1 "register_operand")
- (match_operand:<VS_scalar> 2 "register_operand")
- (match_operand:QI 3 "const_0_to_12_operand")]
- UNSPEC_REPLACE_UN))]
- "TARGET_POWER10"
-{
- /* Immediate value is the byte index Big Endian numbering. */
- emit_insn (gen_vreplace_elt_<mode>_inst (operands[0], operands[1],
- operands[2], operands[3]));
- DONE;
- }
-[(set_attr "type" "vecsimple")])
-
(define_insn "vreplace_elt_<mode>_inst"
[(set (match_operand:REPLACE_ELT 0 "register_operand" "=v")
(unspec:REPLACE_ELT [(match_operand:REPLACE_ELT 1 "register_operand" "0")
"vins<REPLACE_ELT_char> %0,%2,%3"
[(set_attr "type" "vecsimple")])
+(define_insn "vreplace_un_<mode>"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:REPLACE_ELT 1 "register_operand" "0")
+ (match_operand:<VS_scalar> 2 "register_operand" "r")
+ (match_operand:QI 3 "const_0_to_12_operand" "n")]
+ UNSPEC_REPLACE_UN))]
+ "TARGET_POWER10"
+ "vins<REPLACE_ELT_char> %0,%2,%3"
+ [(set_attr "type" "vecsimple")])
+
;; VSX_EXTRACT optimizations
;; Optimize double d = (double) vec_extract (vi, <n>)
;; Get the element into the top position and use XVCVSWDP/XVCVUWDP
vector double src_va_double;
double src_a_double;
+ vector unsigned char vresult_uchar;
+
/* Vector replace 32-bit element */
src_a_uint = 345;
src_va_uint = (vector unsigned int) { 0, 1, 2, 3 };
/* Byte index 7 will overwrite part of elements 2 and 3 */
expected_vresult_uint = (vector unsigned int) { 1, 2, 345*256, 0 };
- vresult_uint = vec_replace_unaligned (src_va_uint, src_a_uint, 3);
+ vresult_uchar = vec_replace_unaligned (src_va_uint, src_a_uint, 3);
+ vresult_uint = (vector unsigned int) vresult_uchar;
if (!vec_all_eq (vresult_uint, expected_vresult_uint)) {
#if DEBUG
/* Byte index 7 will over write part of elements 1 and 2 */
expected_vresult_int = (vector int) { 1, 234*256, 0, 4 };
- vresult_int = vec_replace_unaligned (src_va_int, src_a_int, 7);
+ vresult_uchar = vec_replace_unaligned (src_va_int, src_a_int, 7);
+ vresult_int = (vector signed int) vresult_uchar;
if (!vec_all_eq (vresult_int, expected_vresult_int)) {
#if DEBUG
vresult_float = (vector float) { 0.0, 0.0, 0.0, 0.0 };
expected_vresult_float = (vector float) { 0.0, 34.0, 20.0, 30.0 };
- vresult_float = vec_replace_unaligned (src_va_float, src_a_float, 8);
+ vresult_uchar = vec_replace_unaligned (src_va_float, src_a_float, 8);
+ vresult_float = (vector float) vresult_uchar;
if (!vec_all_eq (vresult_float, expected_vresult_float)) {
#if DEBUG
0x200 };
/* Byte index 7 will over write least significant byte of element 0 */
- vresult_ullint = vec_replace_unaligned (src_va_ullint, src_a_ullint, 7);
+ vresult_uchar = vec_replace_unaligned (src_va_ullint, src_a_ullint, 7);
+ vresult_ullint = (vector unsigned long long) vresult_uchar;
if (!vec_all_eq (vresult_ullint, expected_vresult_ullint)) {
#if DEBUG
/* Byte index 7 will over write least significant byte of element 0 */
expected_vresult_llint = (vector long long int) { 678*256, 0x100 };
- vresult_llint = vec_replace_unaligned (src_va_llint, src_a_llint, 7);
+ vresult_uchar = vec_replace_unaligned (src_va_llint, src_a_llint, 7);
+ vresult_llint = (vector signed long long) vresult_uchar;
if (!vec_all_eq (vresult_llint, expected_vresult_llint)) {
#if DEBUG
vresult_double = (vector double) { 0.0, 0.0 };
expected_vresult_double = (vector double) { 0.0, 678.0 };
- vresult_double = vec_replace_unaligned (src_va_double, src_a_double, 0);
+ vresult_uchar = vec_replace_unaligned (src_va_double, src_a_double, 0);
+ vresult_double = (vector double) vresult_uchar;
if (!vec_all_eq (vresult_double, expected_vresult_double)) {
#if DEBUG