void aarch64_expand_sve_vec_cmp_int (rtx, rtx_code, rtx, rtx);
bool aarch64_expand_sve_vec_cmp_float (rtx, rtx_code, rtx, rtx, bool);
-void aarch64_expand_sve_vcond (machine_mode, machine_mode, rtx *);
bool aarch64_prepare_sve_int_fma (rtx *, rtx_code);
bool aarch64_prepare_sve_cond_int_fma (rtx *, rtx_code);
gcc_unreachable ();
}
+ rtx mask = gen_reg_rtx (V2DImode);
cmp_fmt = gen_rtx_fmt_ee (cmp_operator, V2DImode, operands[1], operands[2]);
- emit_insn (gen_vcondv2div2di (operands[0], operands[1],
- operands[2], cmp_fmt, operands[1], operands[2]));
+ emit_insn (gen_vec_cmpv2div2di (mask, cmp_fmt, operands[1], operands[2]));
+ emit_insn (gen_vcond_mask_v2div2di (operands[0], operands[1],
+ operands[2], mask));
DONE;
})
DONE;
})
-(define_expand "vcond<mode><mode>"
- [(set (match_operand:VALLDI 0 "register_operand")
- (if_then_else:VALLDI
- (match_operator 3 "comparison_operator"
- [(match_operand:VALLDI 4 "register_operand")
- (match_operand:VALLDI 5 "nonmemory_operand")])
- (match_operand:VALLDI 1 "nonmemory_operand")
- (match_operand:VALLDI 2 "nonmemory_operand")))]
- "TARGET_SIMD"
-{
- rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode);
- enum rtx_code code = GET_CODE (operands[3]);
-
- /* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
- it as well as switch operands 1/2 in order to avoid the additional
- NOT instruction. */
- if (code == NE)
- {
- operands[3] = gen_rtx_fmt_ee (EQ, GET_MODE (operands[3]),
- operands[4], operands[5]);
- std::swap (operands[1], operands[2]);
- }
- emit_insn (gen_vec_cmp<mode><v_int_equiv> (mask, operands[3],
- operands[4], operands[5]));
- emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1],
- operands[2], mask));
-
- DONE;
-})
-
-(define_expand "vcond<v_cmp_mixed><mode>"
- [(set (match_operand:<V_cmp_mixed> 0 "register_operand")
- (if_then_else:<V_cmp_mixed>
- (match_operator 3 "comparison_operator"
- [(match_operand:VDQF_COND 4 "register_operand")
- (match_operand:VDQF_COND 5 "nonmemory_operand")])
- (match_operand:<V_cmp_mixed> 1 "nonmemory_operand")
- (match_operand:<V_cmp_mixed> 2 "nonmemory_operand")))]
- "TARGET_SIMD"
-{
- rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode);
- enum rtx_code code = GET_CODE (operands[3]);
-
- /* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
- it as well as switch operands 1/2 in order to avoid the additional
- NOT instruction. */
- if (code == NE)
- {
- operands[3] = gen_rtx_fmt_ee (EQ, GET_MODE (operands[3]),
- operands[4], operands[5]);
- std::swap (operands[1], operands[2]);
- }
- emit_insn (gen_vec_cmp<mode><v_int_equiv> (mask, operands[3],
- operands[4], operands[5]));
- emit_insn (gen_vcond_mask_<v_cmp_mixed><v_int_equiv> (
- operands[0], operands[1],
- operands[2], mask));
-
- DONE;
-})
-
-(define_expand "vcondu<mode><mode>"
- [(set (match_operand:VSDQ_I_DI 0 "register_operand")
- (if_then_else:VSDQ_I_DI
- (match_operator 3 "comparison_operator"
- [(match_operand:VSDQ_I_DI 4 "register_operand")
- (match_operand:VSDQ_I_DI 5 "nonmemory_operand")])
- (match_operand:VSDQ_I_DI 1 "nonmemory_operand")
- (match_operand:VSDQ_I_DI 2 "nonmemory_operand")))]
- "TARGET_SIMD"
-{
- rtx mask = gen_reg_rtx (<MODE>mode);
- enum rtx_code code = GET_CODE (operands[3]);
-
- /* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
- it as well as switch operands 1/2 in order to avoid the additional
- NOT instruction. */
- if (code == NE)
- {
- operands[3] = gen_rtx_fmt_ee (EQ, GET_MODE (operands[3]),
- operands[4], operands[5]);
- std::swap (operands[1], operands[2]);
- }
- emit_insn (gen_vec_cmp<mode><mode> (mask, operands[3],
- operands[4], operands[5]));
- emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1],
- operands[2], mask));
- DONE;
-})
-
-(define_expand "vcondu<mode><v_cmp_mixed>"
- [(set (match_operand:VDQF 0 "register_operand")
- (if_then_else:VDQF
- (match_operator 3 "comparison_operator"
- [(match_operand:<V_cmp_mixed> 4 "register_operand")
- (match_operand:<V_cmp_mixed> 5 "nonmemory_operand")])
- (match_operand:VDQF 1 "nonmemory_operand")
- (match_operand:VDQF 2 "nonmemory_operand")))]
- "TARGET_SIMD"
-{
- rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode);
- enum rtx_code code = GET_CODE (operands[3]);
-
- /* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
- it as well as switch operands 1/2 in order to avoid the additional
- NOT instruction. */
- if (code == NE)
- {
- operands[3] = gen_rtx_fmt_ee (EQ, GET_MODE (operands[3]),
- operands[4], operands[5]);
- std::swap (operands[1], operands[2]);
- }
- emit_insn (gen_vec_cmp<v_cmp_mixed><v_cmp_mixed> (
- mask, operands[3],
- operands[4], operands[5]));
- emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1],
- operands[2], mask));
- DONE;
-})
-
;; Patterns for AArch64 SIMD Intrinsics.
;; Lane extraction with sign extension to general purpose register.
;;
;; == Comparisons and selects
;; ---- [INT,FP] Select based on predicates
-;; ---- [INT,FP] Compare and select
;; ---- [INT] Comparisons
;; ---- [INT] While tests
;; ---- [FP] Direct comparisons
}
)
-;; -------------------------------------------------------------------------
-;; ---- [INT,FP] Compare and select
-;; -------------------------------------------------------------------------
-;; The patterns in this section are synthetic.
-;; -------------------------------------------------------------------------
-
-;; Integer (signed) vcond. Don't enforce an immediate range here, since it
-;; depends on the comparison; leave it to aarch64_expand_sve_vcond instead.
-(define_expand "vcond<SVE_ALL:mode><SVE_I:mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand")
- (if_then_else:SVE_ALL
- (match_operator 3 "comparison_operator"
- [(match_operand:SVE_I 4 "register_operand")
- (match_operand:SVE_I 5 "nonmemory_operand")])
- (match_operand:SVE_ALL 1 "nonmemory_operand")
- (match_operand:SVE_ALL 2 "nonmemory_operand")))]
- "TARGET_SVE && <SVE_ALL:container_bits> == <SVE_I:container_bits>"
- {
- aarch64_expand_sve_vcond (<SVE_ALL:MODE>mode, <SVE_I:MODE>mode, operands);
- DONE;
- }
-)
-
-;; Integer vcondu. Don't enforce an immediate range here, since it
-;; depends on the comparison; leave it to aarch64_expand_sve_vcond instead.
-(define_expand "vcondu<SVE_ALL:mode><SVE_I:mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand")
- (if_then_else:SVE_ALL
- (match_operator 3 "comparison_operator"
- [(match_operand:SVE_I 4 "register_operand")
- (match_operand:SVE_I 5 "nonmemory_operand")])
- (match_operand:SVE_ALL 1 "nonmemory_operand")
- (match_operand:SVE_ALL 2 "nonmemory_operand")))]
- "TARGET_SVE && <SVE_ALL:container_bits> == <SVE_I:container_bits>"
- {
- aarch64_expand_sve_vcond (<SVE_ALL:MODE>mode, <SVE_I:MODE>mode, operands);
- DONE;
- }
-)
-
-;; Floating-point vcond. All comparisons except FCMUO allow a zero operand;
-;; aarch64_expand_sve_vcond handles the case of an FCMUO with zero.
-(define_expand "vcond<mode><v_fp_equiv>"
- [(set (match_operand:SVE_FULL_HSD 0 "register_operand")
- (if_then_else:SVE_FULL_HSD
- (match_operator 3 "comparison_operator"
- [(match_operand:<V_FP_EQUIV> 4 "register_operand")
- (match_operand:<V_FP_EQUIV> 5 "aarch64_simd_reg_or_zero")])
- (match_operand:SVE_FULL_HSD 1 "nonmemory_operand")
- (match_operand:SVE_FULL_HSD 2 "nonmemory_operand")))]
- "TARGET_SVE"
- {
- aarch64_expand_sve_vcond (<MODE>mode, <V_FP_EQUIV>mode, operands);
- DONE;
- }
-)
-
;; -------------------------------------------------------------------------
;; ---- [INT] Comparisons
;; -------------------------------------------------------------------------
return false;
}
-/* Expand an SVE vcond pattern with operands OPS. DATA_MODE is the mode
- of the data being selected and CMP_MODE is the mode of the values being
- compared. */
-
-void
-aarch64_expand_sve_vcond (machine_mode data_mode, machine_mode cmp_mode,
- rtx *ops)
-{
- machine_mode pred_mode = aarch64_get_mask_mode (cmp_mode).require ();
- rtx pred = gen_reg_rtx (pred_mode);
- if (FLOAT_MODE_P (cmp_mode))
- {
- if (aarch64_expand_sve_vec_cmp_float (pred, GET_CODE (ops[3]),
- ops[4], ops[5], true))
- std::swap (ops[1], ops[2]);
- }
- else
- aarch64_expand_sve_vec_cmp_int (pred, GET_CODE (ops[3]), ops[4], ops[5]);
-
- if (!aarch64_sve_reg_or_dup_imm (ops[1], data_mode))
- ops[1] = force_reg (data_mode, ops[1]);
- /* The "false" value can only be zero if the "true" value is a constant. */
- if (register_operand (ops[1], data_mode)
- || !aarch64_simd_reg_or_zero (ops[2], data_mode))
- ops[2] = force_reg (data_mode, ops[2]);
-
- rtvec vec = gen_rtvec (3, pred, ops[1], ops[2]);
- emit_set_insn (ops[0], gen_rtx_UNSPEC (data_mode, vec, UNSPEC_SEL));
-}
-
/* Return true if:
(a) MODE1 and MODE2 use the same layout for bytes that are common
(define_mode_attr vq_int_equiv [(DF "v2di") (SF "v4si")
])
-;; Floating-point equivalent of selected modes.
-(define_mode_attr V_FP_EQUIV [(VNx8HI "VNx8HF") (VNx8HF "VNx8HF")
- (VNx8BF "VNx8HF")
- (VNx4SI "VNx4SF") (VNx4SF "VNx4SF")
- (VNx2DI "VNx2DF") (VNx2DF "VNx2DF")])
-(define_mode_attr v_fp_equiv [(VNx8HI "vnx8hf") (VNx8HF "vnx8hf")
- (VNx8BF "vnx8hf")
- (VNx4SI "vnx4sf") (VNx4SF "vnx4sf")
- (VNx2DI "vnx2df") (VNx2DF "vnx2df")])
-
;; Maps full and partial vector modes of any element type to a full-vector
;; integer mode with the same number of units.
(define_mode_attr V_INT_CONTAINER [(VNx16QI "VNx16QI") (VNx8QI "VNx8HI")
(VNx4SF "vnx4si") (VNx2SF "vnx2di")
(VNx2DF "vnx2di")])
-;; Mode for vector conditional operations where the comparison has
-;; different type from the lhs.
-(define_mode_attr V_cmp_mixed [(V2SI "V2SF") (V4SI "V4SF")
- (V2DI "V2DF") (V2SF "V2SI")
- (V4SF "V4SI") (V2DF "V2DI")])
-
-(define_mode_attr v_cmp_mixed [(V2SI "v2sf") (V4SI "v4sf")
- (V2DI "v2df") (V2SF "v2si")
- (V4SF "v4si") (V2DF "v2di")])
-
;; Lower case element modes (as used in shift immediate patterns).
(define_mode_attr ve_mode [(V8QI "qi") (V16QI "qi")
(V4HI "hi") (V8HI "hi")