BUILTIN_VQN (SHIFTIMM, shrn_n, 0, NONE)
BUILTIN_VQN (USHIFTIMM, shrn_n, 0, NONE)
- BUILTIN_VQN (SHIFT2IMM, shrn2_n, 0, NONE)
- BUILTIN_VQN (USHIFT2IMM, shrn2_n, 0, NONE)
+ BUILTIN_VQN (SHIFT2IMM, ushrn2_n, 0, NONE)
+ BUILTIN_VQN (USHIFT2IMM, ushrn2_n, 0, NONE)
BUILTIN_VQN (SHIFTIMM, rshrn_n, 0, NONE)
BUILTIN_VQN (USHIFTIMM, rshrn_n, 0, NONE)
BUILTIN_SD_HSDI (USHIFTIMM, uqrshrn_n, 0, NONE)
BUILTIN_VQN (SHIFT2IMM_UUSS, sqshrun2_n, 0, NONE)
BUILTIN_VQN (SHIFT2IMM_UUSS, sqrshrun2_n, 0, NONE)
- BUILTIN_VQN (SHIFT2IMM, sqshrn2_n, 0, NONE)
- BUILTIN_VQN (USHIFT2IMM, uqshrn2_n, 0, NONE)
+ BUILTIN_VQN (SHIFT2IMM, sqsshrn2_n, 0, NONE)
+ BUILTIN_VQN (USHIFT2IMM, uqushrn2_n, 0, NONE)
BUILTIN_VQN (SHIFT2IMM, sqrshrn2_n, 0, NONE)
BUILTIN_VQN (USHIFT2IMM, uqrshrn2_n, 0, NONE)
/* Implemented by aarch64_<sur>s<lr>i_n<mode>. */
}
)
-(define_insn "aarch64_<shrn_op>shrn2_n<mode>_insn_le"
+(define_insn "aarch64_<shrn_op><sra_op>shrn2_n<mode>_insn_le"
[(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
(vec_concat:<VNARROWQ2>
(match_operand:<VNARROWQ> 1 "register_operand" "0")
(ALL_TRUNC:<VNARROWQ>
- (<TRUNC_SHIFT>:VQN
+ (SHIFTRT:VQN
(match_operand:VQN 2 "register_operand" "w")
(match_operand:VQN 3 "aarch64_simd_shift_imm_vec_<vn_mode>")))))]
- "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+ "TARGET_SIMD && !BYTES_BIG_ENDIAN
+ && AARCH64_VALID_SHRN_OP (<ALL_TRUNC:CODE>, <SHIFTRT:CODE>)"
"<shrn_op>shrn2\t%<vn2>0.<V2ntype>, %<v>2.<Vtype>, %3"
[(set_attr "type" "neon_shift_imm_narrow_q")]
)
-(define_insn "aarch64_<shrn_op>shrn2_n<mode>_insn_be"
+(define_insn "aarch64_<shrn_op><sra_op>shrn2_n<mode>_insn_be"
[(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
(vec_concat:<VNARROWQ2>
(ALL_TRUNC:<VNARROWQ>
- (<TRUNC_SHIFT>:VQN
+ (SHIFTRT:VQN
(match_operand:VQN 2 "register_operand" "w")
(match_operand:VQN 3 "aarch64_simd_shift_imm_vec_<vn_mode>")))
(match_operand:<VNARROWQ> 1 "register_operand" "0")))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN"
+ "TARGET_SIMD && BYTES_BIG_ENDIAN
+ && AARCH64_VALID_SHRN_OP (<ALL_TRUNC:CODE>, <SHIFTRT:CODE>)"
"<shrn_op>shrn2\t%<vn2>0.<V2ntype>, %<v>2.<Vtype>, %3"
[(set_attr "type" "neon_shift_imm_narrow_q")]
)
-(define_expand "aarch64_<shrn_op>shrn2_n<mode>"
+(define_expand "aarch64_<shrn_op><sra_op>shrn2_n<mode>"
[(match_operand:<VNARROWQ2> 0 "register_operand")
(match_operand:<VNARROWQ> 1 "register_operand")
(ALL_TRUNC:<VNARROWQ>
- (match_operand:VQN 2 "register_operand"))
+ (SHIFTRT:VQN (match_operand:VQN 2 "register_operand")))
(match_operand:SI 3 "aarch64_simd_shift_imm_offset_<vn_mode>")]
- "TARGET_SIMD"
+ "TARGET_SIMD && AARCH64_VALID_SHRN_OP (<ALL_TRUNC:CODE>, <SHIFTRT:CODE>)"
{
operands[3] = aarch64_simd_gen_const_vector_dup (<MODE>mode,
INTVAL (operands[3]));
if (BYTES_BIG_ENDIAN)
- emit_insn (gen_aarch64_<shrn_op>shrn2_n<mode>_insn_be (operands[0],
- operands[1], operands[2], operands[3]));
+ emit_insn (gen_aarch64_<shrn_op><sra_op>shrn2_n<mode>_insn_be (
+ operands[0], operands[1], operands[2], operands[3]));
else
- emit_insn (gen_aarch64_<shrn_op>shrn2_n<mode>_insn_le (operands[0],
- operands[1], operands[2], operands[3]));
+ emit_insn (gen_aarch64_<shrn_op><sra_op>shrn2_n<mode>_insn_le (
+ operands[0], operands[1], operands[2], operands[3]));
DONE;
}
)
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c)
{
- return __builtin_aarch64_sqshrn2_nv8hi (__a, __b, __c);
+ return __builtin_aarch64_sqsshrn2_nv8hi (__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c)
{
- return __builtin_aarch64_sqshrn2_nv4si (__a, __b, __c);
+ return __builtin_aarch64_sqsshrn2_nv4si (__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c)
{
- return __builtin_aarch64_sqshrn2_nv2di (__a, __b, __c);
+ return __builtin_aarch64_sqsshrn2_nv2di (__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c)
{
- return __builtin_aarch64_uqshrn2_nv8hi_uuus (__a, __b, __c);
+ return __builtin_aarch64_uqushrn2_nv8hi_uuus (__a, __b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c)
{
- return __builtin_aarch64_uqshrn2_nv4si_uuus (__a, __b, __c);
+ return __builtin_aarch64_uqushrn2_nv4si_uuus (__a, __b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c)
{
- return __builtin_aarch64_uqshrn2_nv2di_uuus (__a, __b, __c);
+ return __builtin_aarch64_uqushrn2_nv2di_uuus (__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c)
{
- return __builtin_aarch64_shrn2_nv8hi (__a, __b, __c);
+ return __builtin_aarch64_ushrn2_nv8hi (__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c)
{
- return __builtin_aarch64_shrn2_nv4si (__a, __b, __c);
+ return __builtin_aarch64_ushrn2_nv4si (__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c)
{
- return __builtin_aarch64_shrn2_nv2di (__a, __b, __c);
+ return __builtin_aarch64_ushrn2_nv2di (__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c)
{
- return __builtin_aarch64_shrn2_nv8hi_uuus (__a, __b, __c);
+ return __builtin_aarch64_ushrn2_nv8hi_uuus (__a, __b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c)
{
- return __builtin_aarch64_shrn2_nv4si_uuus (__a, __b, __c);
+ return __builtin_aarch64_ushrn2_nv4si_uuus (__a, __b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c)
{
- return __builtin_aarch64_shrn2_nv2di_uuus (__a, __b, __c);
+ return __builtin_aarch64_ushrn2_nv2di_uuus (__a, __b, __c);
}
__extension__ extern __inline poly8x8_t