;; Narrowing operations.
;; For doubles.
-(define_insn "aarch64_simd_vec_pack_trunc_<mode>"
- [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
- (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")))]
- "TARGET_SIMD"
- "xtn\\t%0.<Vntype>, %1.<Vtype>"
+
+(define_insn "aarch64_xtn<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+ (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "xtn\\t%0.<Vntype>, %1.<Vtype>"
[(set_attr "type" "neon_shift_imm_narrow_q")]
)
+(define_insn "aarch64_xtn2<mode>_le"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (match_operand:<VNARROWQ> 1 "register_operand" "0")
+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))]
+ "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+ "xtn2\t%0.<V2ntype>, %2.<Vtype>"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_insn "aarch64_xtn2<mode>_be"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))
+ (match_operand:<VNARROWQ> 1 "register_operand" "0")))]
+ "TARGET_SIMD && BYTES_BIG_ENDIAN"
+ "xtn2\t%0.<V2ntype>, %2.<Vtype>"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_expand "aarch64_xtn2<mode>"
+ [(match_operand:<VNARROWQ2> 0 "register_operand")
+ (match_operand:<VNARROWQ> 1 "register_operand")
+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand"))]
+ "TARGET_SIMD"
+ {
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_aarch64_xtn2<mode>_be (operands[0], operands[1],
+ operands[2]));
+ else
+ emit_insn (gen_aarch64_xtn2<mode>_le (operands[0], operands[1],
+ operands[2]));
+ DONE;
+ }
+)
+
(define_expand "vec_pack_trunc_<mode>"
[(match_operand:<VNARROWD> 0 "register_operand")
(match_operand:VDN 1 "register_operand")
emit_insn (gen_move_lo_quad_<Vdbl> (tempreg, operands[lo]));
emit_insn (gen_move_hi_quad_<Vdbl> (tempreg, operands[hi]));
- emit_insn (gen_aarch64_simd_vec_pack_trunc_<Vdbl> (operands[0], tempreg));
+ emit_insn (gen_aarch64_xtn<Vdbl> (operands[0], tempreg));
DONE;
})
;; For quads.
-(define_insn "vec_pack_trunc_<mode>"
- [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=&w")
+(define_expand "vec_pack_trunc_<mode>"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand")
(vec_concat:<VNARROWQ2>
- (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w"))
- (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))]
+ (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand"))
+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand"))))]
"TARGET_SIMD"
{
+ rtx tmpreg = gen_reg_rtx (<VNARROWQ>mode);
+ int lo = BYTES_BIG_ENDIAN ? 2 : 1;
+ int hi = BYTES_BIG_ENDIAN ? 1 : 2;
+
+ emit_insn (gen_aarch64_xtn<mode> (tmpreg, operands[lo]));
+
if (BYTES_BIG_ENDIAN)
- return "xtn\\t%0.<Vntype>, %2.<Vtype>\;xtn2\\t%0.<V2ntype>, %1.<Vtype>";
+ emit_insn (gen_aarch64_xtn2<mode>_be (operands[0], tmpreg, operands[hi]));
else
- return "xtn\\t%0.<Vntype>, %1.<Vtype>\;xtn2\\t%0.<V2ntype>, %2.<Vtype>";
+ emit_insn (gen_aarch64_xtn2<mode>_le (operands[0], tmpreg, operands[hi]));
+ DONE;
}
- [(set_attr "type" "multiple")
- (set_attr "length" "8")]
)
;; Widening operations.
""
)
-(define_expand "aarch64_xtn<mode>"
- [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
- (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")))]
- "TARGET_SIMD"
- ""
-)
-
;; Truncate a 128-bit integer vector to a 64-bit vector.
(define_insn "trunc<mode><Vnarrowq>2"
[(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
[(set_attr "type" "neon_shift_imm_narrow_q")]
)
-(define_insn "aarch64_xtn2<mode>_le"
- [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
- (vec_concat:<VNARROWQ2>
- (match_operand:<VNARROWQ> 1 "register_operand" "0")
- (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))]
- "TARGET_SIMD && !BYTES_BIG_ENDIAN"
- "xtn2\t%0.<V2ntype>, %2.<Vtype>"
- [(set_attr "type" "neon_shift_imm_narrow_q")]
-)
-
-(define_insn "aarch64_xtn2<mode>_be"
- [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
- (vec_concat:<VNARROWQ2>
- (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))
- (match_operand:<VNARROWQ> 1 "register_operand" "0")))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN"
- "xtn2\t%0.<V2ntype>, %2.<Vtype>"
- [(set_attr "type" "neon_shift_imm_narrow_q")]
-)
-
-(define_expand "aarch64_xtn2<mode>"
- [(match_operand:<VNARROWQ2> 0 "register_operand")
- (match_operand:<VNARROWQ> 1 "register_operand")
- (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand"))]
- "TARGET_SIMD"
- {
- if (BYTES_BIG_ENDIAN)
- emit_insn (gen_aarch64_xtn2<mode>_be (operands[0], operands[1],
- operands[2]));
- else
- emit_insn (gen_aarch64_xtn2<mode>_le (operands[0], operands[1],
- operands[2]));
- DONE;
- }
-)
-
(define_insn "aarch64_bfdot<mode>"
[(set (match_operand:VDQSF 0 "register_operand" "=w")
(plus:VDQSF