;; Machine description for AArch64 AdvSIMD architecture.
-;; Copyright (C) 2011-2019 Free Software Foundation, Inc.
+;; Copyright (C) 2011-2020 Free Software Foundation, Inc.
;; Contributed by ARM Ltd.
;;
;; This file is part of GCC.
[(set_attr "type" "neon_shift_imm<q>")]
)
+(define_insn "*aarch64_simd_sra<mode>"
+ [(set (match_operand:VDQ_I 0 "register_operand" "=w")
+ (plus:VDQ_I
+ (SHIFTRT:VDQ_I
+ (match_operand:VDQ_I 1 "register_operand" "w")
+ (match_operand:VDQ_I 2 "aarch64_simd_rshift_imm" "Dr"))
+ (match_operand:VDQ_I 3 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "<sra_op>sra\t%0.<Vtype>, %1.<Vtype>, %2"
+ [(set_attr "type" "neon_shift_acc<q>")]
+)
+
(define_insn "aarch64_simd_imm_shl<mode>"
[(set (match_operand:VDQ_I 0 "register_operand" "=w")
(ashift:VDQ_I (match_operand:VDQ_I 1 "register_operand" "w")
(define_insn "*aarch64_get_lane_extend<GPI:mode><VDQQH:mode>"
[(set (match_operand:GPI 0 "register_operand" "=r")
(sign_extend:GPI
- (vec_select:<VEL>
+ (vec_select:<VDQQH:VEL>
(match_operand:VDQQH 1 "register_operand" "w")
(parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
"TARGET_SIMD"
{
- operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
+ operands[2] = aarch64_endian_lane_rtx (<VDQQH:MODE>mode,
+ INTVAL (operands[2]));
return "smov\\t%<GPI:w>0, %1.<VDQQH:Vetype>[%2]";
}
- [(set_attr "type" "neon_to_gp<q>")]\r
-)\r
-\r
-(define_insn "*aarch64_get_lane_zero_extend<GPI:mode><VDQQH:mode>"\r
- [(set (match_operand:GPI 0 "register_operand" "=r")\r
- (zero_extend:GPI\r
- (vec_select:<VEL>\r
- (match_operand:VDQQH 1 "register_operand" "w")\r
- (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]\r
- "TARGET_SIMD"\r
- {\r
- operands[2] = aarch64_endian_lane_rtx (<VDQQH:MODE>mode,\r
- INTVAL (operands[2]));\r
- return "umov\\t%w0, %1.<Vetype>[%2]";\r
- }\r
- [(set_attr "type" "neon_to_gp<q>")]\r
+ [(set_attr "type" "neon_to_gp<VDQQH:q>")]
+)
+
+(define_insn "*aarch64_get_lane_zero_extend<GPI:mode><VDQQH:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (zero_extend:GPI
+ (vec_select:<VDQQH:VEL>
+ (match_operand:VDQQH 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
+ "TARGET_SIMD"
+ {
+ operands[2] = aarch64_endian_lane_rtx (<VDQQH:MODE>mode,
+ INTVAL (operands[2]));
+ return "umov\\t%w0, %1.<VDQQH:Vetype>[%2]";
+ }
+ [(set_attr "type" "neon_to_gp<VDQQH:q>")]
)
;; Lane extraction of a value, neither sign nor zero extension
[(set_attr "type" "neon_load1_3reg<q>")]
)
+(define_expand "aarch64_ld1x4<VALLDIF:mode>"
+ [(match_operand:XI 0 "register_operand" "=w")
+ (match_operand:DI 1 "register_operand" "r")
+ (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_SIMD"
+{
+ rtx mem = gen_rtx_MEM (XImode, operands[1]);
+ emit_insn (gen_aarch64_ld1_x4_<VALLDIF:mode> (operands[0], mem));
+ DONE;
+})
+
+(define_insn "aarch64_ld1_x4_<mode>"
+ [(set (match_operand:XI 0 "register_operand" "=w")
+ (unspec:XI
+ [(match_operand:XI 1 "aarch64_simd_struct_operand" "Utv")
+ (unspec:VALLDIF [(const_int 4)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_LD1))]
+ "TARGET_SIMD"
+ "ld1\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
+ [(set_attr "type" "neon_load1_4reg<q>")]
+)
+
(define_expand "aarch64_st1x2<VALLDIF:mode>"
[(match_operand:DI 0 "register_operand")
(match_operand:OI 1 "register_operand")
[(set_attr "type" "neon_store1_3reg<q>")]
)
+(define_expand "aarch64_st1x4<VALLDIF:mode>"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:XI 1 "register_operand" "")
+ (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_SIMD"
+{
+ rtx mem = gen_rtx_MEM (XImode, operands[0]);
+ emit_insn (gen_aarch64_st1_x4_<VALLDIF:mode> (mem, operands[1]));
+ DONE;
+})
+
+(define_insn "aarch64_st1_x4_<mode>"
+ [(set (match_operand:XI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:XI
+ [(match_operand:XI 1 "register_operand" "w")
+ (unspec:VALLDIF [(const_int 4)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST1))]
+ "TARGET_SIMD"
+ "st1\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
+ [(set_attr "type" "neon_store1_4reg<q>")]
+)
+
(define_insn "*aarch64_mov<mode>"
[(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "=w,Utv,w")
(match_operand:VSTRUCT 1 "aarch64_simd_general_operand" " w,w,Utv"))]
;; This instruction's pattern is generated directly by
;; aarch64_expand_vec_perm_const, so any changes to the pattern would
;; need corresponding changes there.
-(define_insn "aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>"
+(define_insn "aarch64_<PERMUTE:perm_insn><mode>"
[(set (match_operand:VALL_F16 0 "register_operand" "=w")
(unspec:VALL_F16 [(match_operand:VALL_F16 1 "register_operand" "w")
(match_operand:VALL_F16 2 "register_operand" "w")]
PERMUTE))]
"TARGET_SIMD"
- "<PERMUTE:perm_insn><PERMUTE:perm_hilo>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ "<PERMUTE:perm_insn>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
[(set_attr "type" "neon_permute<q>")]
)
(define_insn "aarch64_crypto_aes<aes_op>v16qi"
[(set (match_operand:V16QI 0 "register_operand" "=w")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "%0")
- (match_operand:V16QI 2 "register_operand" "w")]
+ (unspec:V16QI
+ [(xor:V16QI
+ (match_operand:V16QI 1 "register_operand" "%0")
+ (match_operand:V16QI 2 "register_operand" "w"))]
CRYPTO_AES))]
"TARGET_SIMD && TARGET_AES"
"aes<aes_op>\\t%0.16b, %2.16b"
[(set_attr "type" "crypto_aese")]
)
-(define_insn "*aarch64_crypto_aes<aes_op>v16qi_xor_combine"
- [(set (match_operand:V16QI 0 "register_operand" "=w")
- (unspec:V16QI [(xor:V16QI
- (match_operand:V16QI 1 "register_operand" "%0")
- (match_operand:V16QI 2 "register_operand" "w"))
- (match_operand:V16QI 3 "aarch64_simd_imm_zero" "")]
- CRYPTO_AES))]
- "TARGET_SIMD && TARGET_AES"
- "aes<aes_op>\\t%0.16b, %2.16b"
- [(set_attr "type" "crypto_aese")]
-)
-
-(define_insn "*aarch64_crypto_aes<aes_op>v16qi_xor_combine"
- [(set (match_operand:V16QI 0 "register_operand" "=w")
- (unspec:V16QI [(match_operand:V16QI 3 "aarch64_simd_imm_zero" "")
- (xor:V16QI (match_operand:V16QI 1 "register_operand" "%0")
- (match_operand:V16QI 2 "register_operand" "w"))]
- CRYPTO_AES))]
- "TARGET_SIMD && TARGET_AES"
- "aes<aes_op>\\t%0.16b, %2.16b"
- [(set_attr "type" "crypto_aese")]
-)
-
-;; When AES/AESMC fusion is enabled we want the register allocation to
-;; look like:
-;; AESE Vn, _
-;; AESMC Vn, Vn
-;; So prefer to tie operand 1 to operand 0 when fusing.
-
(define_insn "aarch64_crypto_aes<aesmc_op>v16qi"
- [(set (match_operand:V16QI 0 "register_operand" "=w,w")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0,w")]
+ [(set (match_operand:V16QI 0 "register_operand" "=w")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "w")]
CRYPTO_AESMC))]
"TARGET_SIMD && TARGET_AES"
"aes<aesmc_op>\\t%0.16b, %1.16b"
- [(set_attr "type" "crypto_aesmc")
- (set_attr_alternative "enabled"
- [(if_then_else (match_test
- "aarch64_fusion_enabled_p (AARCH64_FUSE_AES_AESMC)")
- (const_string "yes" )
- (const_string "no"))
- (const_string "yes")])]
+ [(set_attr "type" "crypto_aesmc")]
)
;; When AESE/AESMC fusion is enabled we really want to keep the two together
;; Mash the two together during combine.
(define_insn "*aarch64_crypto_aese_fused"
- [(set (match_operand:V16QI 0 "register_operand" "=&w")
+ [(set (match_operand:V16QI 0 "register_operand" "=w")
(unspec:V16QI
[(unspec:V16QI
- [(match_operand:V16QI 1 "register_operand" "0")
- (match_operand:V16QI 2 "register_operand" "w")] UNSPEC_AESE)
- ] UNSPEC_AESMC))]
+ [(xor:V16QI
+ (match_operand:V16QI 1 "register_operand" "%0")
+ (match_operand:V16QI 2 "register_operand" "w"))]
+ UNSPEC_AESE)]
+ UNSPEC_AESMC))]
"TARGET_SIMD && TARGET_AES
&& aarch64_fusion_enabled_p (AARCH64_FUSE_AES_AESMC)"
"aese\\t%0.16b, %2.16b\;aesmc\\t%0.16b, %0.16b"
;; Mash the two together during combine.
(define_insn "*aarch64_crypto_aesd_fused"
- [(set (match_operand:V16QI 0 "register_operand" "=&w")
+ [(set (match_operand:V16QI 0 "register_operand" "=w")
(unspec:V16QI
[(unspec:V16QI
- [(match_operand:V16QI 1 "register_operand" "0")
- (match_operand:V16QI 2 "register_operand" "w")] UNSPEC_AESD)
- ] UNSPEC_AESIMC))]
+ [(xor:V16QI
+ (match_operand:V16QI 1 "register_operand" "%0")
+ (match_operand:V16QI 2 "register_operand" "w"))]
+ UNSPEC_AESD)]
+ UNSPEC_AESIMC))]
"TARGET_SIMD && TARGET_AES
&& aarch64_fusion_enabled_p (AARCH64_FUSE_AES_AESMC)"
"aesd\\t%0.16b, %2.16b\;aesimc\\t%0.16b, %0.16b"
"pmull2\\t%0.1q, %1.2d, %2.2d"
[(set_attr "type" "crypto_pmull")]
)
+
+;; Sign- or zero-extend a 64-bit integer vector to a 128-bit vector.
+(define_insn "<optab><Vnarrowq><mode>2"
+ [(set (match_operand:VQN 0 "register_operand" "=w")
+ (ANY_EXTEND:VQN (match_operand:<VNARROWQ> 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "<su>xtl\t%0.<Vtype>, %1.<Vntype>"
+ [(set_attr "type" "neon_shift_imm_long")]
+)
+
+;; Truncate a 128-bit integer vector to a 64-bit vector.
+(define_insn "trunc<mode><Vnarrowq>2"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+ (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "xtn\t%0.<Vntype>, %1.<Vtype>"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)